content
stringlengths
7
2.61M
package database import ( "context" "database/sql" "errors" "fmt" _ "github.com/go-sql-driver/mysql" "github.com/luoshanzhi/monster-go" "math/rand" "reflect" "strconv" "strings" "time" ) var masters []*sql.DB var slaves []*sql.DB var pick = func(dbType string, dbs []*sql.DB) (*sql.DB, error) { if dbType != "master" && dbType != "slave" { return nil, errors.New("dbType error") } if len(dbs) == 0 { return nil, errors.New("dbs len is 0") } var maxCan = -1 var maxCanDB *sql.DB uniMap := make(map[int]bool) for _, item := range dbs { stats := item.Stats() maxOpenConnections := stats.MaxOpenConnections inUse := stats.InUse can := maxOpenConnections - inUse uniMap[can] = true if maxCan == -1 || can > maxCan { maxCan = can maxCanDB = item } } if len(uniMap) == 1 { //所有都相等就随机选一个 rand.Seed(time.Now().UnixNano()) return dbs[rand.Intn(len(dbs))], nil } else { return maxCanDB, nil } } func SetPick(pk func(dbType string, dbs []*sql.DB) (*sql.DB, error)) { if pk != nil { pick = pk } } func Open(options Options) { OpenMaster(options) OpenSlave(options) } func OpenMaster(options Options) { BaseOpen("master", options) } func OpenSlave(options Options) { BaseOpen("slave", options) } func Close() { CloseMaster() CloseSlave() } func CloseMaster() { BaseClose("master") } func CloseSlave() { BaseClose("slave") } func BaseOpen(dbType string, options Options) { if dbType != "master" && dbType != "slave" { panic("dbType error") } mysqlConfig := monster.CurEnvConfig.Mysql var dbArr []monster.MysqlSettingItem if dbType == "master" { dbArr = mysqlConfig.Master } else if dbType == "slave" { dbArr = mysqlConfig.Slave } if len(dbArr) == 0 { panic("config error") } var dbs []*sql.DB for _, item := range dbArr { host := item.Host user := item.User password := <PASSWORD> dBase := item.Database port := item.Port charset := strings.TrimSpace(options.Charset) interpolateParams := "" if charset == "" { charset = "utf8" } if options.InterpolateParams { interpolateParams = "&interpolateParams=true" } db, err := sql.Open("mysql", user+":"+password+"@tcp("+host+":"+strconv.Itoa(port)+")/"+dBase+"?charset="+charset+interpolateParams) if err != nil { panic(err) } //设置<=0数,将不限制时间 db.SetConnMaxLifetime(options.ConnMaxLifetime) db.SetMaxOpenConns(options.MaxOpenConns) db.SetMaxIdleConns(options.MaxIdleConns) err = db.Ping() if err != nil { panic(err) } dbs = append(dbs, db) } if dbType == "master" { masters = dbs monster.CommonLog.Info("数据库: 主库启动成功") } else if dbType == "slave" { slaves = dbs monster.CommonLog.Info("数据库: 从库启动成功") } if options.StatisticsLog { statisticsLogDuration := options.StatisticsLogDuration if statisticsLogDuration <= 0 { statisticsLogDuration = time.Second * 5 } go func() { for { stats, err := BaseStats(dbType) if err != nil { return } monster.StatisticsLog. WithField("name", "database-"+dbType). WithField("use", stats.Use). WithField("idle", stats.Idle). Info() time.Sleep(statisticsLogDuration) } }() } } func BaseClose(dbType string) { if dbType != "master" && dbType != "slave" { panic("dbType error") } var dbs []*sql.DB if dbType == "master" { dbs = masters } else if dbType == "slave" { dbs = slaves } for _, db := range dbs { db.Close() } if dbType == "master" { masters = nil } else if dbType == "slave" { slaves = nil } } func Stats() (Statistics, error) { var statistics Statistics masterStats, masterErr := MasterStats() if masterErr != nil { return statistics, masterErr } slaveStats, slaveErr := SlaveStats() if slaveErr != nil { return statistics, slaveErr } statistics.Use += masterStats.Use statistics.Idle += masterStats.Idle statistics.Use += slaveStats.Use statistics.Idle += slaveStats.Idle return statistics, nil } func MasterStats() (Statistics, error) { return BaseStats("master") } func SlaveStats() (Statistics, error) { return BaseStats("slave") } func BaseStats(dbType string) (Statistics, error) { var statistics Statistics var dbs []*sql.DB if dbType == "master" { dbs = masters } else if dbType == "slave" { dbs = slaves } if len(dbs) == 0 { return statistics, errors.New("dbs len is 0") } for _, db := range dbs { stats := db.Stats() statistics.Use += stats.InUse statistics.Idle += stats.Idle } return statistics, nil } func DB() *sql.DB { return Master() } func Master() *sql.DB { db, _ := pick("master", masters) return db } func Slave() *sql.DB { db, _ := pick("slave", slaves) return db } func Query(handler Handler, col interface{}, query string, args ...interface{}) error { return QueryContext(context.Background(), handler, col, query, args...) } func QueryRow(handler Handler, col interface{}, query string, args ...interface{}) error { return QueryRowContext(context.Background(), handler, col, query, args...) } func Exec(handler Handler, query string, args ...interface{}) (sql.Result, error) { return ExecContext(context.Background(), handler, query, args...) } func Prepare(handler Handler, query string) (*sql.Stmt, error) { return PrepareContext(context.Background(), handler, query) } func QueryContext(ctx context.Context, handler Handler, col interface{}, query string, args ...interface{}) error { if handler == nil { return errors.New("handler is nil") } colValueElem, colItemType, colItemTagMap, reflectErr := colReflect(col) if reflectErr != nil { return reflectErr } monster.CommonLog.Trace("sql("+fmt.Sprintf("%p", handler)+"):", query) rows, err := handler.QueryContext(ctx, query, args...) if err != nil { return err } defer rows.Close() scan_, scanErr := rowsScan(rows, colItemType, colItemTagMap) if scanErr != nil { return scanErr } for rows.Next() { appendErr := colAppend(rows, &scan_, colValueElem, colItemType) if appendErr != nil { return appendErr } } return nil } func QueryRowContext(ctx context.Context, handler Handler, col interface{}, query string, args ...interface{}) error { if handler == nil { return errors.New("handler is nil") } colValueElem, colItemType, colItemTagMap, reflectErr := colReflect(col) if reflectErr != nil { return reflectErr } monster.CommonLog.Trace("sql("+fmt.Sprintf("%p", handler)+"):", query) rows, err := handler.QueryContext(ctx, query, args...) if err != nil { return err } defer rows.Close() scan_, scanErr := rowsScan(rows, colItemType, colItemTagMap) if scanErr != nil { return scanErr } if rows.Next() { appendErr := colAppend(rows, &scan_, colValueElem, colItemType) if appendErr != nil { return appendErr } } else { return errors.New("not exists") } return nil } func ExecContext(ctx context.Context, handler Handler, query string, args ...interface{}) (sql.Result, error) { if handler == nil { return nil, errors.New("handler is nil") } monster.CommonLog.Trace("sql("+fmt.Sprintf("%p", handler)+"):", query) return handler.ExecContext(ctx, query, args...) } func PrepareContext(ctx context.Context, handler Handler, query string) (*sql.Stmt, error) { monster.CommonLog.Trace("sql("+fmt.Sprintf("%p", handler)+"):", query) return handler.PrepareContext(ctx, query) } func StmtQueryContext(ctx context.Context, stmt *sql.Stmt, col interface{}, args ...interface{}) error { if stmt == nil { return errors.New("stmt is nil") } colValueElem, colItemType, colItemTagMap, reflectErr := colReflect(col) if reflectErr != nil { return reflectErr } rows, err := stmt.QueryContext(ctx, args...) if err != nil { return err } defer rows.Close() scan_, scanErr := rowsScan(rows, colItemType, colItemTagMap) if scanErr != nil { return scanErr } for rows.Next() { appendErr := colAppend(rows, &scan_, colValueElem, colItemType) if appendErr != nil { return appendErr } } return nil } func StmtQueryRowContext(ctx context.Context, stmt *sql.Stmt, col interface{}, args ...interface{}) error { if stmt == nil { return errors.New("stmt is nil") } colValueElem, colItemType, colItemTagMap, reflectErr := colReflect(col) if reflectErr != nil { return reflectErr } rows, err := stmt.QueryContext(ctx, args...) if err != nil { return err } defer rows.Close() scan_, scanErr := rowsScan(rows, colItemType, colItemTagMap) if scanErr != nil { return scanErr } if rows.Next() { appendErr := colAppend(rows, &scan_, colValueElem, colItemType) if appendErr != nil { return appendErr } } else { return errors.New("not exists") } return nil } func StmtExecContext(ctx context.Context, stmt *sql.Stmt, args ...interface{}) (sql.Result, error) { return stmt.ExecContext(ctx, args...) } func colReflect(col interface{}) (colValueElem reflect.Value, colItemType reflect.Type, colItemTagMap map[string]string, err error) { colValue := reflect.ValueOf(col) if colValue.Kind() != reflect.Ptr { err = errors.New("col is not ptr") return } if colValue.IsNil() { err = errors.New("col is nil") return } colValueElem = colValue.Elem() colItemType = colValueElem.Type() if colItemType.Kind() == reflect.Slice { //获取切片里面item类型 colItemType = colItemType.Elem() } if colItemType.Kind() != reflect.Struct { err = errors.New("colItem is not struct") return } colItemTagMap = make(map[string]string) colItemNumField := colItemType.NumField() for i := 0; i < colItemNumField; i++ { field := colItemType.Field(i) name := field.Name val := field.Tag.Get("db") if val == "" { continue } colItemTagMap[val] = name } return } func rowsScan(rows *sql.Rows, colItemType reflect.Type, colItemTagMap map[string]string) (scan, error) { var scan_ scan columns, columnErr := rows.Columns() if columnErr != nil { return scan_, columnErr } length := len(columns) dest := make([]interface{}, length) column := make([]string, length) for i, item := range columns { if name, ok := colItemTagMap[item]; ok { item = name } else { //字段没设置tag,就按首字母大写找字段 item = monster.FirstUpper(item) } field, ok := colItemType.FieldByName(item) if !ok { return scan_, errors.New(item + " is not in col") } var addr interface{} //防止数据库字段null出错 switch field.Type.Kind() { case reflect.String: addr = &sql.NullString{} case reflect.Int, reflect.Int64: addr = &sql.NullInt64{} case reflect.Int32: addr = &sql.NullInt32{} case reflect.Int16: addr = &sql.NullInt16{} case reflect.Float32, reflect.Float64: addr = &sql.NullFloat64{} case reflect.Bool: addr = &sql.NullBool{} case reflect.Struct: if field.Type == reflect.TypeOf((*time.Time)(nil)).Elem() { addr = &sql.NullTime{} } case reflect.Uint8: addr = &sql.NullByte{} default: return scan_, errors.New("colField type error") } dest[i] = addr column[i] = item } scan_.dest = dest scan_.column = column return scan_, nil } func colAppend(rows *sql.Rows, scan_ *scan, colValueElem reflect.Value, colItemType reflect.Type) error { err := rows.Scan(scan_.dest...) if err != nil { return err } newValue := reflect.New(colItemType).Elem() for i, item := range scan_.column { colField := newValue.FieldByName(item) switch obj := scan_.dest[i].(type) { case *sql.NullString: colField.SetString(obj.String) case *sql.NullInt64: colField.SetInt(obj.Int64) case *sql.NullInt32: colField.SetInt(int64(obj.Int32)) case *sql.NullInt16: colField.SetInt(int64(obj.Int16)) case *sql.NullFloat64: colField.SetFloat(obj.Float64) case *sql.NullBool: colField.SetBool(obj.Bool) case *sql.NullTime: colField.Set(reflect.ValueOf(obj.Time)) case *sql.NullByte: colField.Set(reflect.ValueOf(obj.Byte)) } } if colValueElem.Kind() == reflect.Slice { colValueElem.Set(reflect.Append(colValueElem, newValue)) } else { colValueElem.Set(newValue) } return nil }
Change detection algorithm for land cover in grid systems Natural phenomena, like season changing, Vulcan eruptions, flooding, or even non-natural ones, like human intervention, may lead to a completely new image of the Earth. However, one thing is sure: the Earth's surface is permanently changing and the need for disaster prevention is immediate. In this context, one of the main challenges is to provide fast and accurate information; imagine PBs of data coming from Satellites every day. The data processing has to be fast and accurate, in order to overcome any disaster. In this paper we analyze the advantages of a distributed change detection algorithm in terms of speedup.
Something I used to do all too often was to have a recurring problem that I feebly failed to solve. And I know I’m not the only one. Most of us do it. Each time, we’re forced to deal with the small emergency that results, in a way that costs time, annoyance, and even money. Figure out some foolproof way to keep from having the thing happen the next time. Start doing it that way now. Keep losing your glasses? Decide on where to put them – say a particular spot on your desk – so that you’ll always be able to find them. Never put them anywhere else. Lock your keys in your car? Before you lock your car door, visually verify that your keys are either in your hand or in your pocket. Never do it any other way. Or: Go down to the dealer and have a spare key made. Put it in your purse or billfold or pocket. Never leave the house without it. Forget to pay the phone bill? The day the bill comes in the mail, sit down and write out a check, slip it into the envelope, slap a stamp on it and put it where you won’t forget to mail it. Never vary from the practice. Or: Put all received bills in one specific tray on your desk. Pay them ALL each Tuesday, whether due or not. Never change the habit. The power of Solve It Once is that the solution quickly moves from the conscious part of your brain to the unconscious, making it automatic, painless and easy.
// Copyright (c) 2004-present Facebook All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Code generated by entc, DO NOT EDIT. package ent import ( "context" "database/sql/driver" "errors" "fmt" "math" "github.com/facebook/ent/dialect/sql" "github.com/facebook/ent/dialect/sql/sqlgraph" "github.com/facebook/ent/schema/field" "github.com/facebookincubator/symphony/pkg/ent/parametercatalog" "github.com/facebookincubator/symphony/pkg/ent/predicate" "github.com/facebookincubator/symphony/pkg/ent/propertycategory" ) // ParameterCatalogQuery is the builder for querying ParameterCatalog entities. type ParameterCatalogQuery struct { config limit *int offset *int order []OrderFunc unique []string predicates []predicate.ParameterCatalog // eager-loading edges. withPropertyCategories *PropertyCategoryQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) } // Where adds a new predicate for the builder. func (pcq *ParameterCatalogQuery) Where(ps ...predicate.ParameterCatalog) *ParameterCatalogQuery { pcq.predicates = append(pcq.predicates, ps...) return pcq } // Limit adds a limit step to the query. func (pcq *ParameterCatalogQuery) Limit(limit int) *ParameterCatalogQuery { pcq.limit = &limit return pcq } // Offset adds an offset step to the query. func (pcq *ParameterCatalogQuery) Offset(offset int) *ParameterCatalogQuery { pcq.offset = &offset return pcq } // Order adds an order step to the query. func (pcq *ParameterCatalogQuery) Order(o ...OrderFunc) *ParameterCatalogQuery { pcq.order = append(pcq.order, o...) return pcq } // QueryPropertyCategories chains the current query on the property_categories edge. func (pcq *ParameterCatalogQuery) QueryPropertyCategories() *PropertyCategoryQuery { query := &PropertyCategoryQuery{config: pcq.config} query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pcq.prepareQuery(ctx); err != nil { return nil, err } selector := pcq.sqlQuery() if err := selector.Err(); err != nil { return nil, err } step := sqlgraph.NewStep( sqlgraph.From(parametercatalog.Table, parametercatalog.FieldID, selector), sqlgraph.To(propertycategory.Table, propertycategory.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, parametercatalog.PropertyCategoriesTable, parametercatalog.PropertyCategoriesColumn), ) fromU = sqlgraph.SetNeighbors(pcq.driver.Dialect(), step) return fromU, nil } return query } // First returns the first ParameterCatalog entity in the query. Returns *NotFoundError when no parametercatalog was found. func (pcq *ParameterCatalogQuery) First(ctx context.Context) (*ParameterCatalog, error) { nodes, err := pcq.Limit(1).All(ctx) if err != nil { return nil, err } if len(nodes) == 0 { return nil, &NotFoundError{parametercatalog.Label} } return nodes[0], nil } // FirstX is like First, but panics if an error occurs. func (pcq *ParameterCatalogQuery) FirstX(ctx context.Context) *ParameterCatalog { node, err := pcq.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } return node } // FirstID returns the first ParameterCatalog id in the query. Returns *NotFoundError when no id was found. func (pcq *ParameterCatalogQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int if ids, err = pcq.Limit(1).IDs(ctx); err != nil { return } if len(ids) == 0 { err = &NotFoundError{parametercatalog.Label} return } return ids[0], nil } // FirstIDX is like FirstID, but panics if an error occurs. func (pcq *ParameterCatalogQuery) FirstIDX(ctx context.Context) int { id, err := pcq.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } return id } // Only returns the only ParameterCatalog entity in the query, returns an error if not exactly one entity was returned. func (pcq *ParameterCatalogQuery) Only(ctx context.Context) (*ParameterCatalog, error) { nodes, err := pcq.Limit(2).All(ctx) if err != nil { return nil, err } switch len(nodes) { case 1: return nodes[0], nil case 0: return nil, &NotFoundError{parametercatalog.Label} default: return nil, &NotSingularError{parametercatalog.Label} } } // OnlyX is like Only, but panics if an error occurs. func (pcq *ParameterCatalogQuery) OnlyX(ctx context.Context) *ParameterCatalog { node, err := pcq.Only(ctx) if err != nil { panic(err) } return node } // OnlyID returns the only ParameterCatalog id in the query, returns an error if not exactly one id was returned. func (pcq *ParameterCatalogQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int if ids, err = pcq.Limit(2).IDs(ctx); err != nil { return } switch len(ids) { case 1: id = ids[0] case 0: err = &NotFoundError{parametercatalog.Label} default: err = &NotSingularError{parametercatalog.Label} } return } // OnlyIDX is like OnlyID, but panics if an error occurs. func (pcq *ParameterCatalogQuery) OnlyIDX(ctx context.Context) int { id, err := pcq.OnlyID(ctx) if err != nil { panic(err) } return id } // All executes the query and returns a list of ParameterCatalogs. func (pcq *ParameterCatalogQuery) All(ctx context.Context) ([]*ParameterCatalog, error) { if err := pcq.prepareQuery(ctx); err != nil { return nil, err } return pcq.sqlAll(ctx) } // AllX is like All, but panics if an error occurs. func (pcq *ParameterCatalogQuery) AllX(ctx context.Context) []*ParameterCatalog { nodes, err := pcq.All(ctx) if err != nil { panic(err) } return nodes } // IDs executes the query and returns a list of ParameterCatalog ids. func (pcq *ParameterCatalogQuery) IDs(ctx context.Context) ([]int, error) { var ids []int if err := pcq.Select(parametercatalog.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. func (pcq *ParameterCatalogQuery) IDsX(ctx context.Context) []int { ids, err := pcq.IDs(ctx) if err != nil { panic(err) } return ids } // Count returns the count of the given query. func (pcq *ParameterCatalogQuery) Count(ctx context.Context) (int, error) { if err := pcq.prepareQuery(ctx); err != nil { return 0, err } return pcq.sqlCount(ctx) } // CountX is like Count, but panics if an error occurs. func (pcq *ParameterCatalogQuery) CountX(ctx context.Context) int { count, err := pcq.Count(ctx) if err != nil { panic(err) } return count } // Exist returns true if the query has elements in the graph. func (pcq *ParameterCatalogQuery) Exist(ctx context.Context) (bool, error) { if err := pcq.prepareQuery(ctx); err != nil { return false, err } return pcq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. func (pcq *ParameterCatalogQuery) ExistX(ctx context.Context) bool { exist, err := pcq.Exist(ctx) if err != nil { panic(err) } return exist } // Clone returns a duplicate of the query builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. func (pcq *ParameterCatalogQuery) Clone() *ParameterCatalogQuery { if pcq == nil { return nil } return &ParameterCatalogQuery{ config: pcq.config, limit: pcq.limit, offset: pcq.offset, order: append([]OrderFunc{}, pcq.order...), unique: append([]string{}, pcq.unique...), predicates: append([]predicate.ParameterCatalog{}, pcq.predicates...), withPropertyCategories: pcq.withPropertyCategories.Clone(), // clone intermediate query. sql: pcq.sql.Clone(), path: pcq.path, } } // WithPropertyCategories tells the query-builder to eager-loads the nodes that are connected to // the "property_categories" edge. The optional arguments used to configure the query builder of the edge. func (pcq *ParameterCatalogQuery) WithPropertyCategories(opts ...func(*PropertyCategoryQuery)) *ParameterCatalogQuery { query := &PropertyCategoryQuery{config: pcq.config} for _, opt := range opts { opt(query) } pcq.withPropertyCategories = query return pcq } // GroupBy used to group vertices by one or more fields/columns. // It is often used with aggregate functions, like: count, max, mean, min, sum. // // Example: // // var v []struct { // CreateTime time.Time `json:"create_time,omitempty"` // Count int `json:"count,omitempty"` // } // // client.ParameterCatalog.Query(). // GroupBy(parametercatalog.FieldCreateTime). // Aggregate(ent.Count()). // Scan(ctx, &v) // func (pcq *ParameterCatalogQuery) GroupBy(field string, fields ...string) *ParameterCatalogGroupBy { group := &ParameterCatalogGroupBy{config: pcq.config} group.fields = append([]string{field}, fields...) group.path = func(ctx context.Context) (prev *sql.Selector, err error) { if err := pcq.prepareQuery(ctx); err != nil { return nil, err } return pcq.sqlQuery(), nil } return group } // Select one or more fields from the given query. // // Example: // // var v []struct { // CreateTime time.Time `json:"create_time,omitempty"` // } // // client.ParameterCatalog.Query(). // Select(parametercatalog.FieldCreateTime). // Scan(ctx, &v) // func (pcq *ParameterCatalogQuery) Select(field string, fields ...string) *ParameterCatalogSelect { selector := &ParameterCatalogSelect{config: pcq.config} selector.fields = append([]string{field}, fields...) selector.path = func(ctx context.Context) (prev *sql.Selector, err error) { if err := pcq.prepareQuery(ctx); err != nil { return nil, err } return pcq.sqlQuery(), nil } return selector } func (pcq *ParameterCatalogQuery) prepareQuery(ctx context.Context) error { if pcq.path != nil { prev, err := pcq.path(ctx) if err != nil { return err } pcq.sql = prev } if err := parametercatalog.Policy.EvalQuery(ctx, pcq); err != nil { return err } return nil } func (pcq *ParameterCatalogQuery) sqlAll(ctx context.Context) ([]*ParameterCatalog, error) { var ( nodes = []*ParameterCatalog{} _spec = pcq.querySpec() loadedTypes = [1]bool{ pcq.withPropertyCategories != nil, } ) _spec.ScanValues = func() []interface{} { node := &ParameterCatalog{config: pcq.config} nodes = append(nodes, node) values := node.scanValues() return values } _spec.Assign = func(values ...interface{}) error { if len(nodes) == 0 { return fmt.Errorf("ent: Assign called without calling ScanValues") } node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(values...) } if err := sqlgraph.QueryNodes(ctx, pcq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } if query := pcq.withPropertyCategories; query != nil { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[int]*ParameterCatalog) for i := range nodes { fks = append(fks, nodes[i].ID) nodeids[nodes[i].ID] = nodes[i] nodes[i].Edges.PropertyCategories = []*PropertyCategory{} } query.withFKs = true query.Where(predicate.PropertyCategory(func(s *sql.Selector) { s.Where(sql.InValues(parametercatalog.PropertyCategoriesColumn, fks...)) })) neighbors, err := query.All(ctx) if err != nil { return nil, err } for _, n := range neighbors { fk := n.parameter_catalog_property_categories if fk == nil { return nil, fmt.Errorf(`foreign-key "parameter_catalog_property_categories" is nil for node %v`, n.ID) } node, ok := nodeids[*fk] if !ok { return nil, fmt.Errorf(`unexpected foreign-key "parameter_catalog_property_categories" returned %v for node %v`, *fk, n.ID) } node.Edges.PropertyCategories = append(node.Edges.PropertyCategories, n) } } return nodes, nil } func (pcq *ParameterCatalogQuery) sqlCount(ctx context.Context) (int, error) { _spec := pcq.querySpec() return sqlgraph.CountNodes(ctx, pcq.driver, _spec) } func (pcq *ParameterCatalogQuery) sqlExist(ctx context.Context) (bool, error) { n, err := pcq.sqlCount(ctx) if err != nil { return false, fmt.Errorf("ent: check existence: %v", err) } return n > 0, nil } func (pcq *ParameterCatalogQuery) querySpec() *sqlgraph.QuerySpec { _spec := &sqlgraph.QuerySpec{ Node: &sqlgraph.NodeSpec{ Table: parametercatalog.Table, Columns: parametercatalog.Columns, ID: &sqlgraph.FieldSpec{ Type: field.TypeInt, Column: parametercatalog.FieldID, }, }, From: pcq.sql, Unique: true, } if ps := pcq.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } if limit := pcq.limit; limit != nil { _spec.Limit = *limit } if offset := pcq.offset; offset != nil { _spec.Offset = *offset } if ps := pcq.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector, parametercatalog.ValidColumn) } } } return _spec } func (pcq *ParameterCatalogQuery) sqlQuery() *sql.Selector { builder := sql.Dialect(pcq.driver.Dialect()) t1 := builder.Table(parametercatalog.Table) selector := builder.Select(t1.Columns(parametercatalog.Columns...)...).From(t1) if pcq.sql != nil { selector = pcq.sql selector.Select(selector.Columns(parametercatalog.Columns...)...) } for _, p := range pcq.predicates { p(selector) } for _, p := range pcq.order { p(selector, parametercatalog.ValidColumn) } if offset := pcq.offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } if limit := pcq.limit; limit != nil { selector.Limit(*limit) } return selector } // ParameterCatalogGroupBy is the builder for group-by ParameterCatalog entities. type ParameterCatalogGroupBy struct { config fields []string fns []AggregateFunc // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) } // Aggregate adds the given aggregation functions to the group-by query. func (pcgb *ParameterCatalogGroupBy) Aggregate(fns ...AggregateFunc) *ParameterCatalogGroupBy { pcgb.fns = append(pcgb.fns, fns...) return pcgb } // Scan applies the group-by query and scan the result into the given value. func (pcgb *ParameterCatalogGroupBy) Scan(ctx context.Context, v interface{}) error { query, err := pcgb.path(ctx) if err != nil { return err } pcgb.sql = query return pcgb.sqlScan(ctx, v) } // ScanX is like Scan, but panics if an error occurs. func (pcgb *ParameterCatalogGroupBy) ScanX(ctx context.Context, v interface{}) { if err := pcgb.Scan(ctx, v); err != nil { panic(err) } } // Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. func (pcgb *ParameterCatalogGroupBy) Strings(ctx context.Context) ([]string, error) { if len(pcgb.fields) > 1 { return nil, errors.New("ent: ParameterCatalogGroupBy.Strings is not achievable when grouping more than 1 field") } var v []string if err := pcgb.Scan(ctx, &v); err != nil { return nil, err } return v, nil } // StringsX is like Strings, but panics if an error occurs. func (pcgb *ParameterCatalogGroupBy) StringsX(ctx context.Context) []string { v, err := pcgb.Strings(ctx) if err != nil { panic(err) } return v } // String returns a single string from group-by. It is only allowed when querying group-by with one field. func (pcgb *ParameterCatalogGroupBy) String(ctx context.Context) (_ string, err error) { var v []string if v, err = pcgb.Strings(ctx); err != nil { return } switch len(v) { case 1: return v[0], nil case 0: err = &NotFoundError{parametercatalog.Label} default: err = fmt.Errorf("ent: ParameterCatalogGroupBy.Strings returned %d results when one was expected", len(v)) } return } // StringX is like String, but panics if an error occurs. func (pcgb *ParameterCatalogGroupBy) StringX(ctx context.Context) string { v, err := pcgb.String(ctx) if err != nil { panic(err) } return v } // Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. func (pcgb *ParameterCatalogGroupBy) Ints(ctx context.Context) ([]int, error) { if len(pcgb.fields) > 1 { return nil, errors.New("ent: ParameterCatalogGroupBy.Ints is not achievable when grouping more than 1 field") } var v []int if err := pcgb.Scan(ctx, &v); err != nil { return nil, err } return v, nil } // IntsX is like Ints, but panics if an error occurs. func (pcgb *ParameterCatalogGroupBy) IntsX(ctx context.Context) []int { v, err := pcgb.Ints(ctx) if err != nil { panic(err) } return v } // Int returns a single int from group-by. It is only allowed when querying group-by with one field. func (pcgb *ParameterCatalogGroupBy) Int(ctx context.Context) (_ int, err error) { var v []int if v, err = pcgb.Ints(ctx); err != nil { return } switch len(v) { case 1: return v[0], nil case 0: err = &NotFoundError{parametercatalog.Label} default: err = fmt.Errorf("ent: ParameterCatalogGroupBy.Ints returned %d results when one was expected", len(v)) } return } // IntX is like Int, but panics if an error occurs. func (pcgb *ParameterCatalogGroupBy) IntX(ctx context.Context) int { v, err := pcgb.Int(ctx) if err != nil { panic(err) } return v } // Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. func (pcgb *ParameterCatalogGroupBy) Float64s(ctx context.Context) ([]float64, error) { if len(pcgb.fields) > 1 { return nil, errors.New("ent: ParameterCatalogGroupBy.Float64s is not achievable when grouping more than 1 field") } var v []float64 if err := pcgb.Scan(ctx, &v); err != nil { return nil, err } return v, nil } // Float64sX is like Float64s, but panics if an error occurs. func (pcgb *ParameterCatalogGroupBy) Float64sX(ctx context.Context) []float64 { v, err := pcgb.Float64s(ctx) if err != nil { panic(err) } return v } // Float64 returns a single float64 from group-by. It is only allowed when querying group-by with one field. func (pcgb *ParameterCatalogGroupBy) Float64(ctx context.Context) (_ float64, err error) { var v []float64 if v, err = pcgb.Float64s(ctx); err != nil { return } switch len(v) { case 1: return v[0], nil case 0: err = &NotFoundError{parametercatalog.Label} default: err = fmt.Errorf("ent: ParameterCatalogGroupBy.Float64s returned %d results when one was expected", len(v)) } return } // Float64X is like Float64, but panics if an error occurs. func (pcgb *ParameterCatalogGroupBy) Float64X(ctx context.Context) float64 { v, err := pcgb.Float64(ctx) if err != nil { panic(err) } return v } // Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. func (pcgb *ParameterCatalogGroupBy) Bools(ctx context.Context) ([]bool, error) { if len(pcgb.fields) > 1 { return nil, errors.New("ent: ParameterCatalogGroupBy.Bools is not achievable when grouping more than 1 field") } var v []bool if err := pcgb.Scan(ctx, &v); err != nil { return nil, err } return v, nil } // BoolsX is like Bools, but panics if an error occurs. func (pcgb *ParameterCatalogGroupBy) BoolsX(ctx context.Context) []bool { v, err := pcgb.Bools(ctx) if err != nil { panic(err) } return v } // Bool returns a single bool from group-by. It is only allowed when querying group-by with one field. func (pcgb *ParameterCatalogGroupBy) Bool(ctx context.Context) (_ bool, err error) { var v []bool if v, err = pcgb.Bools(ctx); err != nil { return } switch len(v) { case 1: return v[0], nil case 0: err = &NotFoundError{parametercatalog.Label} default: err = fmt.Errorf("ent: ParameterCatalogGroupBy.Bools returned %d results when one was expected", len(v)) } return } // BoolX is like Bool, but panics if an error occurs. func (pcgb *ParameterCatalogGroupBy) BoolX(ctx context.Context) bool { v, err := pcgb.Bool(ctx) if err != nil { panic(err) } return v } func (pcgb *ParameterCatalogGroupBy) sqlScan(ctx context.Context, v interface{}) error { for _, f := range pcgb.fields { if !parametercatalog.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} } } selector := pcgb.sqlQuery() if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() if err := pcgb.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } func (pcgb *ParameterCatalogGroupBy) sqlQuery() *sql.Selector { selector := pcgb.sql columns := make([]string, 0, len(pcgb.fields)+len(pcgb.fns)) columns = append(columns, pcgb.fields...) for _, fn := range pcgb.fns { columns = append(columns, fn(selector, parametercatalog.ValidColumn)) } return selector.Select(columns...).GroupBy(pcgb.fields...) } // ParameterCatalogSelect is the builder for select fields of ParameterCatalog entities. type ParameterCatalogSelect struct { config fields []string // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) } // Scan applies the selector query and scan the result into the given value. func (pcs *ParameterCatalogSelect) Scan(ctx context.Context, v interface{}) error { query, err := pcs.path(ctx) if err != nil { return err } pcs.sql = query return pcs.sqlScan(ctx, v) } // ScanX is like Scan, but panics if an error occurs. func (pcs *ParameterCatalogSelect) ScanX(ctx context.Context, v interface{}) { if err := pcs.Scan(ctx, v); err != nil { panic(err) } } // Strings returns list of strings from selector. It is only allowed when selecting one field. func (pcs *ParameterCatalogSelect) Strings(ctx context.Context) ([]string, error) { if len(pcs.fields) > 1 { return nil, errors.New("ent: ParameterCatalogSelect.Strings is not achievable when selecting more than 1 field") } var v []string if err := pcs.Scan(ctx, &v); err != nil { return nil, err } return v, nil } // StringsX is like Strings, but panics if an error occurs. func (pcs *ParameterCatalogSelect) StringsX(ctx context.Context) []string { v, err := pcs.Strings(ctx) if err != nil { panic(err) } return v } // String returns a single string from selector. It is only allowed when selecting one field. func (pcs *ParameterCatalogSelect) String(ctx context.Context) (_ string, err error) { var v []string if v, err = pcs.Strings(ctx); err != nil { return } switch len(v) { case 1: return v[0], nil case 0: err = &NotFoundError{parametercatalog.Label} default: err = fmt.Errorf("ent: ParameterCatalogSelect.Strings returned %d results when one was expected", len(v)) } return } // StringX is like String, but panics if an error occurs. func (pcs *ParameterCatalogSelect) StringX(ctx context.Context) string { v, err := pcs.String(ctx) if err != nil { panic(err) } return v } // Ints returns list of ints from selector. It is only allowed when selecting one field. func (pcs *ParameterCatalogSelect) Ints(ctx context.Context) ([]int, error) { if len(pcs.fields) > 1 { return nil, errors.New("ent: ParameterCatalogSelect.Ints is not achievable when selecting more than 1 field") } var v []int if err := pcs.Scan(ctx, &v); err != nil { return nil, err } return v, nil } // IntsX is like Ints, but panics if an error occurs. func (pcs *ParameterCatalogSelect) IntsX(ctx context.Context) []int { v, err := pcs.Ints(ctx) if err != nil { panic(err) } return v } // Int returns a single int from selector. It is only allowed when selecting one field. func (pcs *ParameterCatalogSelect) Int(ctx context.Context) (_ int, err error) { var v []int if v, err = pcs.Ints(ctx); err != nil { return } switch len(v) { case 1: return v[0], nil case 0: err = &NotFoundError{parametercatalog.Label} default: err = fmt.Errorf("ent: ParameterCatalogSelect.Ints returned %d results when one was expected", len(v)) } return } // IntX is like Int, but panics if an error occurs. func (pcs *ParameterCatalogSelect) IntX(ctx context.Context) int { v, err := pcs.Int(ctx) if err != nil { panic(err) } return v } // Float64s returns list of float64s from selector. It is only allowed when selecting one field. func (pcs *ParameterCatalogSelect) Float64s(ctx context.Context) ([]float64, error) { if len(pcs.fields) > 1 { return nil, errors.New("ent: ParameterCatalogSelect.Float64s is not achievable when selecting more than 1 field") } var v []float64 if err := pcs.Scan(ctx, &v); err != nil { return nil, err } return v, nil } // Float64sX is like Float64s, but panics if an error occurs. func (pcs *ParameterCatalogSelect) Float64sX(ctx context.Context) []float64 { v, err := pcs.Float64s(ctx) if err != nil { panic(err) } return v } // Float64 returns a single float64 from selector. It is only allowed when selecting one field. func (pcs *ParameterCatalogSelect) Float64(ctx context.Context) (_ float64, err error) { var v []float64 if v, err = pcs.Float64s(ctx); err != nil { return } switch len(v) { case 1: return v[0], nil case 0: err = &NotFoundError{parametercatalog.Label} default: err = fmt.Errorf("ent: ParameterCatalogSelect.Float64s returned %d results when one was expected", len(v)) } return } // Float64X is like Float64, but panics if an error occurs. func (pcs *ParameterCatalogSelect) Float64X(ctx context.Context) float64 { v, err := pcs.Float64(ctx) if err != nil { panic(err) } return v } // Bools returns list of bools from selector. It is only allowed when selecting one field. func (pcs *ParameterCatalogSelect) Bools(ctx context.Context) ([]bool, error) { if len(pcs.fields) > 1 { return nil, errors.New("ent: ParameterCatalogSelect.Bools is not achievable when selecting more than 1 field") } var v []bool if err := pcs.Scan(ctx, &v); err != nil { return nil, err } return v, nil } // BoolsX is like Bools, but panics if an error occurs. func (pcs *ParameterCatalogSelect) BoolsX(ctx context.Context) []bool { v, err := pcs.Bools(ctx) if err != nil { panic(err) } return v } // Bool returns a single bool from selector. It is only allowed when selecting one field. func (pcs *ParameterCatalogSelect) Bool(ctx context.Context) (_ bool, err error) { var v []bool if v, err = pcs.Bools(ctx); err != nil { return } switch len(v) { case 1: return v[0], nil case 0: err = &NotFoundError{parametercatalog.Label} default: err = fmt.Errorf("ent: ParameterCatalogSelect.Bools returned %d results when one was expected", len(v)) } return } // BoolX is like Bool, but panics if an error occurs. func (pcs *ParameterCatalogSelect) BoolX(ctx context.Context) bool { v, err := pcs.Bool(ctx) if err != nil { panic(err) } return v } func (pcs *ParameterCatalogSelect) sqlScan(ctx context.Context, v interface{}) error { for _, f := range pcs.fields { if !parametercatalog.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for selection", f)} } } rows := &sql.Rows{} query, args := pcs.sqlQuery().Query() if err := pcs.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } func (pcs *ParameterCatalogSelect) sqlQuery() sql.Querier { selector := pcs.sql selector.Select(selector.Columns(pcs.fields...)...) return selector }
For the 100th anniversary of the Model T, Ford thought it would rip off Jalopnik’s patented top 10 list with one of their own. While there’s no denying the importance of the Model T in creating the consumer/industrial complex, some of Ford’s claims seem a bit rose-tinted. So let’s take a look at ten ways Ford thinks the Model T changed the world, along with a little bit of third-party perspective.
/* Return which tree structure is used by NODE, or TS_D_GENERIC if NODE is one of the language-independent trees. */ d_tree_node_structure_enum d_tree_node_structure (lang_tree_node *t) { switch (TREE_CODE (&t->generic)) { case IDENTIFIER_NODE: return TS_D_IDENTIFIER; case FUNCFRAME_INFO: return TS_D_FRAMEINFO; default: return TS_D_GENERIC; } }
# Programa pra saber quantas vezes a letra 'a' aparece # A primeira vez que ela aparece # A última vez que ela aparece palavra = input("Digite uma palavra: ").strip() quantidade_letra = palavra.lower().count('a') print(f"A quantidade de vezes que 'A' apareceu foi {quantidade_letra}\n") if quantidade_letra > 0: primeira_vez = palavra.lower().find('a') + 1 ultima_vez = palavra.lower().rfind('a') + 1 print(f"A primeira vez foi no index {primeira_vez}\n" f"A ultima vez foi no index {ultima_vez}\n")
Ecology of the Heart: Understanding How People Experience Natural Environments Ecology is defined as the science that studies relationships between organisms and their environments. There are many different perspectives through which these relationships can be viewed. As a natural science, ecology focuses on understanding the physical, chemical and biological interactions that take place between organisms and environments. When the organisms in question include human beings, however, there are many other kinds of interactions that must be considered as well.
/* eslint-disable no-bitwise */
def main( seed: int, batch_size: int, image_size: int, limit_data: float, num_classes: int, data_dir: str, efficientnet: str, epochs: int, use_gpu: bool, precision: int, patience: int, ) -> None: seed_everything(seed) get_splits(data_dir, limit_data) model_checkpoint = ModelCheckpoint( monitor="val_loss", verbose=True, filename="{epoch}_{val_loss:.4f}_{val_acc:02f}", ) early_stop_callback = EarlyStopping( monitor="val_loss", min_delta=0.00, patience=patience, verbose=True, mode="min" ) dm = RoomDataModule(image_size=image_size, batch_size=batch_size) model = RoomEfficientNet(num_classes=num_classes, efficientnet=efficientnet) if use_gpu: gpus = -1 else: gpus = 0 precision = 32 trainer = pl.Trainer( gpus=gpus, max_epochs=epochs, callbacks=[model_checkpoint, early_stop_callback], flush_logs_every_n_steps=1, log_every_n_steps=1, precision=precision, ) trainer.fit(model=model, datamodule=dm) trainer.test(dataloaders=dm.test_dataloader())
<filename>src/app/give-help/give-help-form/publish-ad-button/publish-ad-button.component.ts import { Component, Host, Input } from '@angular/core'; import { NgForm } from '@angular/forms'; @Component({ selector: 'app-publish-ad-button', templateUrl: './publish-ad-button.component.html', styleUrls: ['./publish-ad-button.component.scss'], }) export class PublishAdButtonComponent { @Input() isEditRoute: boolean = false; constructor(@Host() readonly form: NgForm) {} }
// TODO: implement response error func Err(w http.ResponseWriter, err error) { _, ok := err.(*errors.RespError) if !ok { err = errors.InternalServerError(err.Error()) } er, _ := err.(*errors.RespError) w.Header().Set("Content-Type", "application/json") w.WriteHeader(er.Code) res := Response{ Message: &er.Message, } json.NewEncoder(w).Encode(res) }
Review: Latin America: The Politics of Reform in Peru one might wish, he neglects in the end to relate it to Himmler's later and fiercer Nazi pattern. The question, for instance, of why a young conservative bourgeois would have moved into a minor bureaucratic position with the 'revolutionary' Nazi party by 1926 is left largely unanswered. The connection between young Himmler's sexual attitudes and later outbursts of sadism has been much more satisfactorily examined by Peter Loewenberg (American Historical Review 1971). And whether Himmler's early intellectual endeavours had a bearing on the scholastic dilettantism of the Reichsfiihrer ss who established a private research foundation of his own is not even touched upon by Smith. It is hardly surprising, then, that Smith neglects to mention several important titles in his bibliography that deal, in part, with later aspects of Himmler's political and intellectual development, such as the in many ways parallel study by Josef Ackermann, the monographs by Hans Buchheim and Eugen Kogon, and the memoirs by Felix Kersten and Rudolf Hoess.
Random Copolymers Outperform Gradient and Block Copolymers in Stabilizing Organic Photovoltaics Recent advances have led to conjugated polymerbased photovoltaic devices with efficiencies rivaling amorphous silicon. Nevertheless, these devices become less efficient over time due to changes in active layer morphology, thereby hindering their commercialization. Copolymer additives are a promising approach toward stabilizing blend morphologies; however, little is known about the impact of copolymer sequence, composition, and concentration. Herein, the impact of these parameters is determined by synthesizing random, block, and gradient copolymers with a poly(3hexylthiophene) (P3HT) backbone and sidechain fullerenes (phenylC61butyric acid methyl ester (PC61BM)). These copolymers are evaluated as compatibilizers in photovoltaic devices with P3HT:PC61BM as the active layer. The random copolymer with 20 mol% fullerene side chains and at 8 wt% concentration in the blend gives the most stable morphologies. Devices containing the random copolymer also exhibit higher and more stable power conversion efficiencies than the control device. Combined, these studies point to the random copolymer as a promising new scaffold for stabilizing bulk heterojunction photovoltaics.
<filename>SDK/src/main/java/com/pa/sdk/entity/ExistEntity.java // Decompiled by Jad v1.5.8e2. Copyright 2001 <NAME>. // Jad home page: http://kpdus.tripod.com/jad.html // Decompiler options: packimports(3) fieldsfirst ansi space // Source File Name: ExistEntity.java package com.pa.sdk.entity; public class ExistEntity { private boolean isExist; public ExistEntity() { } public boolean isExist() { return isExist; } public void setExist(boolean isExist) { this.isExist = isExist; } }
def tofloat(x): from numpy import nan try: return float(x) except: return nan
/* Copyright (C) 2017 Verizon. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package config import ( "os" "time" "github.com/channelmeter/vault-gatekeeper-mesos/gatekeeper" vaultapi "github.com/hashicorp/vault/api" "github.com/lavaorg/lrtx/config" "github.com/lavaorg/lrtx/mlog" ) var ( // config signature changed. the default value 2 is not passsed anymore. CassandraDatacenter, _ = config.GetString("DATACENTER", "") CassandraProtoVersion, _ = config.GetInt("CASSANDRA_PROTO_VERSION", 3) CassandraCQLVersion, _ = config.GetString("CASSANDRA_CQL_VERSION", "") CassandraAuthEnabled, _ = config.GetBool("CASSANDRA_AUTH_ENABLED", false) VaultHostPort, _ = config.GetString("VAULT_HOST_PORT", "") GatekeeperHostPort, _ = config.GetString("GATEKEEPER_HOST_PORT", "") CassandraUsername, CassandraPassword = GetCassandraAuthCredentials(GatekeeperHostPort, VaultHostPort) ) func GetCassandraAuthCredentials(gatekeeperHostPort string, vaultHostPort string) (username string, password string) { if CassandraAuthEnabled == true { vaultAddress := "http://" + vaultHostPort gatekeeperAddress := "http://" + gatekeeperHostPort vaultToken := vaultAuth(gatekeeperAddress, vaultAddress) for { client, err := vaultapi.NewClient(vaultapi.DefaultConfig()) if err != nil { mlog.Alarm("Error initializing vault client: %s", err.Error()) time.Sleep(1 * time.Second) continue } client.SetAddress(vaultAddress) client.SetToken(vaultToken) c := client.Logical() secret, err := c.Read("secret/cassandra") if err != nil { return username, password } username := secret.Data["username"].(string) password := secret.Data["password"].(string) return username, password } } else { return username, password } } func vaultAuth(gatekeeperAddress string, vaultAddress string) string { mlog.Debug("Authenticating with Vault") mesosTaskId := fetchAndCheckEnv("MESOS_TASK_ID", true) for { mlog.Info("Getting token from gatekeeper for mesosTaskId %s", mesosTaskId) client, err := gatekeeper.NewClient(vaultAddress, gatekeeperAddress, nil) if err != nil { mlog.Alarm("Error initializing gatekeeper client: %s", err.Error()) time.Sleep(1 * time.Second) continue } token, err := client.RequestVaultToken(mesosTaskId) if err == nil { return token } else { mlog.Alarm("Couldn't get vault token: %s", err.Error()) time.Sleep(1 * time.Second) } } } func fetchAndCheckEnv(env string, mandatory bool) string { val := os.Getenv(env) if mandatory && len(val) <= 0 { mlog.Error("%s env variable is not set", env) } return val }
Spirituality in geriatric psychiatry Purpose of review Academic recognition of the implications of religion/spirituality (R/S) for mental health is increasing, with a growing number of studies involving older adults. The present review provides an overview of these studies, highlighting the influence of R/S on older adults mental health and the clinical implications of addressing R/S in the geriatrics and gerontology context. Recent findings The available evidence suggests that R/S involvement is usually associated with lower levels of depression, substance use/abuse, and cognitive declining and better quality of life, well being, and functional status in older persons. Despite the number of studies showing this relationship, few have yet investigated the effects of addressing spiritual needs or carrying out R/S interventions in this age group. Summary Evidence is mounting that R/S is most important in geriatric psychiatry. In general, studies have shown a positive relationship between R/S and mental health in the older population. Health professionals should be attentive to these spiritual needs. Nevertheless, more studies are needed to investigate the mechanisms of the R/Smental health association and how to integrate R/S in clinical practice.
/** * Created by numb3rs on 4/29/18. */ public class NewsHolder extends AbstractFlexibleItem<NewsHolder.NewsViewHolder> implements IFilterable<String>, IHolder<News>{ private News news; @Ignore int mFontSizeTitle = 13; @Ignore int mFontSizeDetails = 10; public NewsHolder(News news) { this.news = news; } @Override public boolean equals(Object o) { if (o instanceof NewsHolder){ NewsHolder newsHolder = (NewsHolder) o; return news.equals(newsHolder.getModel()); } return false; } @Override public int hashCode() { return news.hashCode(); } @Override public int getLayoutRes() { return R.layout.news_list_item; } @Override public NewsViewHolder createViewHolder(View view, FlexibleAdapter<IFlexible> adapter) { return new NewsViewHolder(view, adapter); } @Override public void bindViewHolder(FlexibleAdapter<IFlexible> adapter, NewsViewHolder holder, int position, List<Object> payloads) { String getTheTitle = news.getTitle().getRendered(); //Replace ASCII codes with proper Characters String formatTitle = String.valueOf(Html.fromHtml(getTheTitle)); holder.mTitle.setText(formatTitle); setImage(holder); setDateOn(holder); setWebsite(holder); holder.mTitle.setEnabled(true); holder.thumbnail.setEnabled(true); } private void setWebsite(NewsViewHolder holder) { String websiteName = news.getGuid().getRendered(); try { URL url = new URL(websiteName); String host = url.getHost(); String[] array = host.split("\\."); if (array[0].equals("www")) { holder.website.setText(array[1].toLowerCase()); } else holder.website.setText(array[0].toLowerCase()); holder.website.setTextSize(TypedValue.COMPLEX_UNIT_DIP, mFontSizeDetails); } catch (MalformedURLException e) { e.printStackTrace(); } } private void setDateOn(NewsViewHolder holder) { try { String date = news.getDate(); SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyy-MM-dd'T'HH:mm:ss"); Date mDate = null; long timeInMilliseconds = 0; try { mDate = simpleDateFormat.parse(date); timeInMilliseconds = mDate.getTime(); } catch (ParseException e) { e.printStackTrace(); } holder.date.setReferenceTime(timeInMilliseconds); holder.date.setTextSize(TypedValue.COMPLEX_UNIT_DIP, mFontSizeDetails); } catch (NumberFormatException nfe) { } } private void setImage (NewsViewHolder holder) { Context context = holder.itemView.getContext(); if (news.getEmbedded().getWpFeaturedmedia().get(0).getMediaDetails() != null) { if (news.getEmbedded().getWpFeaturedmedia().get(0) .getMediaDetails().getSizes().getMediumLarge() != null) { String thumbnail_url = news.getEmbedded().getWpFeaturedmedia().get(0) .getMediaDetails().getSizes().getMediumLarge().getSourceUrl(); Glide.with(context) .load(thumbnail_url) // .placeholder(R.mipmap.placeholder) .into(holder.thumbnail); } else { if (news.getEmbedded().getWpFeaturedmedia().get(0) .getMediaDetails().getSizes().getMedium() != null){ String thumbnail_url = news.getEmbedded().getWpFeaturedmedia().get(0) .getMediaDetails().getSizes().getMedium().getSourceUrl(); Glide.with(context) .load(thumbnail_url) // .placeholder(R.mipmap.placeholder) .into(holder.thumbnail); } } } else { Glide.with(context) .load(R.mipmap.placeholder) .into(holder.thumbnail); } } @Override public boolean filter(String constraint) { return news.getLink() != null && news.getLink().equals(constraint); } @Override public News getModel() { return news; } static class NewsViewHolder extends FlexibleViewHolder { public TextView mTitle; ImageView thumbnail; RelativeTimeTextView date; TextView website; public NewsViewHolder(View view, FlexibleAdapter adapter) { super(view, adapter); mTitle = view.findViewById(R.id.news_title); website = view.findViewById(R.id.news_site); thumbnail = view.findViewById(R.id.news_image); date =view.findViewById(R.id.news_date); } } public void setFontSizes(int mCurrentFontSize, int mFontSizeDetails) { this.mFontSizeTitle = mCurrentFontSize; this.mFontSizeDetails = mFontSizeDetails; } }
import json import logging from edge.opensearch.isoresponse import IsoResponse from datetime import date, datetime class IsoResponseBySolr(IsoResponse): def __init__(self): super(IsoResponseBySolr, self).__init__() def generate(self, solrDatasetResponse, solrGranuleResponse = None, pretty=False): self._populate(solrDatasetResponse, solrGranuleResponse) return super(IsoResponseBySolr, self).generate(pretty) def _populate(self, solrDatasetResponse, solrGranuleResponse = None): if solrDatasetResponse is not None: solrJson = json.loads(solrDatasetResponse) logging.debug('dataset count: '+str(len(solrJson['response']['docs']))) if len(solrJson['response']['docs']) == 1: # ok now populate variables! doc = solrJson['response']['docs'][0] #self.variables['Dataset_ShortName'] = doc['Dataset-ShortName'][0] #self.variables['Dataset_ShortName'] = u'unko' self.variables['doc'] = doc # Format dates try: self.variables['DatasetCitation_ReleaseDate'] = date.fromtimestamp(float(doc['DatasetCitation-ReleaseDateLong'][0]) / 1000).strftime('%Y%m%d') self.variables['DatasetCoverage_StartTime'] = self._convertTimeLongToISO(doc['DatasetCoverage-StartTimeLong'][0]) self.variables['DatasetCoverage_StopTime'] = self._convertTimeLongToISO(doc['DatasetCoverage-StopTimeLong'][0]) except: pass try: # Create list of unique dataset sensor self.variables['UniqueDatasetSensor'] = {} for i, x in enumerate(doc['DatasetSource-Sensor-ShortName']): self.variables['UniqueDatasetSensor'][x] = i self.variables['UniqueDatasetSensor'] = list(self.variables['UniqueDatasetSensor'].values()) # Create list of unique dataset source self.variables['UniqueDatasetSource'] = {} for i, x in enumerate(doc['DatasetSource-Source-ShortName']): self.variables['UniqueDatasetSource'][x] = i self.variables['UniqueDatasetSource'] = list(self.variables['UniqueDatasetSource'].values()) # Replace all none, None values with empty string doc['DatasetParameter-VariableDetail'] = [self._filterString(variableDetail) for variableDetail in doc['DatasetParameter-VariableDetail']] # Current date self.variables['DateStamp'] = datetime.utcnow().strftime('%Y%m%d') # Data format version self.variables['DatasetPolicy_DataFormat_Version'] = self._getDataFormatVersion(doc['DatasetPolicy-DataFormat'][0]) except Exception as e: logging.debug("Problem generating ISO " + str(e)) del self.variables['doc'] if solrGranuleResponse is not None: solrGranuleJson = json.loads(solrGranuleResponse) logging.debug('granule count: '+str(len(solrGranuleJson['response']['docs']))) for doc in solrGranuleJson['response']['docs']: self._populateItem(solrGranuleResponse, doc, None) doc['Granule-StartTimeLong'][0] = self._convertTimeLongToISO(doc['Granule-StartTimeLong'][0]) doc['Granule-StopTimeLong'][0] = self._convertTimeLongToISO(doc['Granule-StopTimeLong'][0]) doc['Granule-ArchiveTimeLong'][0] = self._convertTimeLongToISO(doc['Granule-ArchiveTimeLong'][0]) doc['Granule-CreateTimeLong'][0] = self._convertTimeLongToISO(doc['Granule-CreateTimeLong'][0]) # Create dictionary for bounding box extent ''' if ('GranuleReal-Value' in doc and 'GranuleReal-DatasetElement-Element-ShortName' in doc): self.variables['GranuleBoundingBox'] = dict(zip(doc['GranuleReal-DatasetElement-Element-ShortName'], doc['GranuleReal-Value'])) ''' if 'GranuleSpatial-NorthLat' in doc and 'GranuleSpatial-EastLon' in doc and 'GranuleSpatial-SouthLat' in doc and 'GranuleSpatial-WestLon' in doc: self.variables['GranuleBoundingBox'] = dict([('southernmostLatitude', doc['GranuleSpatial-SouthLat'][0]), ('northernmostLatitude', doc['GranuleSpatial-NorthLat'][0]), ('westernmostLongitude', doc['GranuleSpatial-WestLon'][0]), ('easternmostLongitude', doc['GranuleSpatial-EastLon'][0])]) break self.variables['granules'] = solrGranuleJson['response']['docs'] def _populateChannel(self, solrResponse): pass def _populateItem(self, solrResponse, doc, item): pass def _convertTimeLongToISO(self, time): isoTime = '' try: isoTime = datetime.utcfromtimestamp(float(time) / 1000).isoformat() + 'Z' except ValueError: pass return isoTime def _filterString(self, str): if str.lower() == 'none': return '' else: return str def _getDataFormatVersion(self, dataFormat): version = '' if dataFormat == 'NETCDF': version = 3 elif dataFormat == 'HDF': version = 4 else: try: version = int(dataFormat[-1]) except: pass return version
Diaphragm dysfunction in chronic obstructive pulmonary disease. RATIONALE Hypercapnic respiratory failure because of inspiratory muscle weakness is the most important cause of death in chronic obstructive pulmonary disease (COPD). However, the pathophysiology of failure of the diaphragm to generate force in COPD is in part unclear. OBJECTIVES The present study investigated contractile function and myosin heavy chain content of diaphragm muscle single fibers from patients with COPD. METHODS Skinned muscle fibers were isolated from muscle biopsies from the diaphragm of eight patients with mild to moderate COPD and five patients without COPD (mean FEV % predicted, 70 and 100%, respectively). Contractile function of single fibers was assessed, and afterwards, myosin heavy chain content was determined in these fibers. In diaphragm muscle homogenates, the level of ubiquitin-protein conjugation was determined. RESULTS Diaphragm muscle fibers from patients with COPD showed reduced force generation per cross-sectional area, and reduced myosin heavy chain content per half sarcomere. In addition, these fibers had decreased Ca2+ sensitivity of force generation, and slower cross-bridge cycling kinetics. Our observations were present in fibers expressing slow and 2A isoforms of myosin heavy chain. Ubiquitin-protein conjugation was increased in diaphragm muscle homogenates of patients with mild to moderate COPD. CONCLUSIONS Early in the development of COPD, diaphragm fiber contractile function is impaired. Our data suggest that enhanced diaphragm protein degradation through the ubiquitin-proteasome pathway plays a role in loss of contractile protein and, consequently, failure of the diaphragm to generate force.
<reponame>ansteh/sec-data<filename>cockpit/src/app/filings/scale-ranking/scale-ranking.component.ts import { Component, OnInit, Input } from '@angular/core'; import { forkJoin } from 'rxjs'; import { map, mergeMap } from 'rxjs/operators'; import { DiaryService } from './../../diary/diary.service'; import { ScaleEngineService } from './../scale-engine/scale-engine.service'; import * as _ from 'lodash'; const periodes = ['caped', 'longterm', 'midterm']; const properties = ['deps', 'oeps', 'fcf']; const assignMarginOfSafety = (report: any) => { const { price } = report || { price: null }; if(_.isNumber(price) && report.dcfs) { _.forEach(periodes, (periode) => { const dcfs = _.get(report.dcfs, periode); _.forEach(properties, (property) => { const value = _.get(dcfs, property); if(value > 0) _.set(report, ['mos', periode, property], 1 - price/value); }); }); } }; @Component({ selector: 'sec-scale-ranking', templateUrl: './scale-ranking.component.html', styleUrls: ['./scale-ranking.component.scss'] }) export class ScaleRankingComponent implements OnInit { @Input() filename: string = 'buffet'; public displayedColumns = [ 'ticker', 'score', 'avg', 'change', 'price', 'measure.deps', 'measure.oeps', 'measure.fcf', 'measure.mean', 'estimate', ]; public reports: any[] = []; public sortedReports: any[] = []; public entryType: string = 'mos'; public periode: string = 'longterm'; constructor(private diary: DiaryService, private engine: ScaleEngineService) { } ngOnInit() { this.getReports(); } sortData(event: any) { console.log('event', event); if(event.direction === '') { this.sortedReports = this.reports; } else { const getValue = (item) => { const value = _.get(item, event.active); return value ? value : { desc: -1, asc: 1 }[event.direction]*1000; }; this.sortedReports = _.orderBy( this.reports, [getValue, 'score'], [event.direction, 'desc'] ); } } sortByEstimate() { // _.forEach(this.reports, (report) => { // report.estimate = this.getEstimate(report); // }); this.reports = _.orderBy(this.reports, ['estimate', 'score'], ['desc', 'desc']); } getReports() { const reports = this.engine.getTemplate(this.filename) .pipe(mergeMap(template => this.engine.createReports(template))); const prices = this.diary.getDays() .pipe( map(days => _.last(days)), mergeMap(day => this.diary.getSummary(day)), map((summary: any) => { return _ .chain(_.get(summary, 'stocks')) .map(({ ticker, price }) => { return { ticker: _.last(ticker.split(':')), price, }; }) .filter(stock => _.isNumber(stock.price)) .reduce((store, { ticker, price }) => { store[ticker] = price; return store; }, {}) .value(); }) ); forkJoin([prices, reports]) .pipe(map(([prices, reports]) => { _.forEach(reports, (report) => { report.price = prices[report.ticker]; assignMarginOfSafety(report); }); return reports; })) .subscribe((reports: any) => { this.reports = reports; this.setFrames(); console.log('this.reports', this.reports); this.sortedReports = _.cloneDeep(this.reports); }); } setFrames() { _.forEach(this.reports, (report) => { report.estimate = this.getEstimate(report); report.measure = this.getMeasure(report); report.measure.mean = this.getMean(report); }); } getMeasure(row): any { return _.get(row, [this.entryType, this.periode]) || {}; } getMean(row): any { const measure = this.getMeasure(row); return _ .chain(properties) .map(property => measure[property] || 0) // .filter() .mean() .value(); } getEstimate(row): any { const mean = this.getMean(row); if(mean > 0) { return mean * row.score; } return 0; } }
import * as Koa from 'koa' import * as bodyParser from 'koa-bodyparser' import router from './router' const app = new Koa() app.use(bodyParser()) app.use(router) app.use((ctx:any) => { ctx.body = 'api server' }) app.listen(2018) console.log('Listening on 2018...') export default app
TORONTO – “Put him on the list,” is how coach Greg Vanney put it post-game. Despite being forced to replace an injured Victor Vazquez at halftime of Wednesday night’s 2-1 loss to the Seattle Sounders, Vanney remains hesitant to blame injuries for TFC’s slow start. “We’ll deal with it,” he added. But even the best MLS rosters aren’t equipped to go through what the Reds are going through: Injuries to more than half of their MLS Cup-winning lineup. Drew Moor (quadriceps) and Jozy Altidore (foot surgery) won’t be back any time soon. Chris Mavinga, Eriq Zavaleta, Nick Hagglund and Justin Morrow are said to be close to a return, but might not be risked this weekend in New England. Victor Vazquez (undisclosed) and Nico Hasler (quadriceps) could be massive losses in midfield if their injuries are at all serious. The Reds are playing at less than 50% early this season, falling further behind the Eastern Conference leaders. It’s not complicated, as I wrote in Thursday’s Sun. The longer it takes the Reds to get healthy the deeper the hole they’re going to be in. Larson’s spread in Friday’s Sun. At some point, you have to wonder if TFC would be better served playing on turf than the bouncy, chunky, longish grass at BMO Field. “I still think guys are winding up to take shots and it’s taking a long time to sort of gauge if the ball is going to be rolling or if it’s going to bounce,” Vanney said. “I’m not making excuses, but in a game where time is of the essence because the numbers are really tight, you have to be really precise and things have to happen fast. Vanney doesn’t like to criticize TFC’s groundsmen. It’s not necessarily their fault the pitch is in bad shape. Regardless, nobody at the club is happy with the field. The MLS Players’ Association released its bi-annual list of player salaries. Here are TFC’s 10 highest-paid players annually, according to the union: Sebastian Giovinco ($7.1 million), Michael Bradley ($6.5 million), Jozy Altidore ($5 million), Victor Vazquez ($1.5 million), Ager Aketxe ($1.3 million), Gregory van der Wiel ($835,000), Chris Mavinga ($563,333), Drew Moor ($350,000), Justin Morrow ($300,000) and Auro ($272,504). The Reds already have lost as many games in 2018 as they did all of last season. While they’re not in jeopardy of missing the playoffs, it’s extremely unlikely they’ll claim a top two spot in the East. The numbers say TFC is left to chase a third- or fourth-place finish to avoid playing a first round playoff match on the road. With his assist Wednesday night, Sebastian Giovinco (56 goals, 41 assists in 92 MLS games) inched closer to becoming the fastest player to reach 100 points … Canadian midfielder Mark-Anthony Kaye has been a wonderful addition to Los Angeles FC, starting nine games as a holding midfielder and chipping in a goal and an assist. Could Canada’s national team be lucky enough to replace Atiba Hutchinson with a like-for-like player? He was once a member of the TFC Academy … The Reds are hoping to get at least one defender back in time for Saturday’s match in New England. But should they risk anyone on the Revolution’s turf? … How much longer will Alex Bono be a TFC player? Belgian publication Voetbal Nieuws reports Club Brugge — Vazquez’s former club — is looking at making a bid for TFC’s 24-year-old No. 1. Doesn’t it feel a bit early for Bono to consider a European move — especially one that’s somewhat lateral? … Toronto FC’s defensive injuries are hurting it in more ways than one. Yes, Bradley has been a solid stopgap to, at times, save the day. But the Reds are missing his ability to dictate the game in midfield … The Sounders forced everything wide this week. They knew Morrow wasn’t available to make a difference on the flanks and Altidore wasn’t present to get on the end of crosses … Things could get hairy if the Reds aren’t in a playoff spot by U.S. Independence Day. TFC plays five of six on the road in July – including stops in Kansas City, Orlando and Atlanta … Lastly, Video Replay isn’t the issue in MLS. The Video Assistant Referee is. I needed one press box replay Wednesday night to see that Seattle’s game-winner was offside. How did the VAR, who has more evidence at his disposal, miss that call? D.C. United is reportedly on the verge of signing former England international Wayne Rooney, who, surprisingly, is still just 32 years old. Sky Sports on Thursday reported Rooney’s $16-million move from Everton is done. Multiple reports later added the deal hasn’t been completed. While many lambasted news of the possible move, I’m open to it. Don’t forget, United is set to open a new venue this summer and needs someone to bring it notoriety. That has to be worth something. Plus, Rooney just wrapped another Premier League season in which he started 27 times and contributed 10 goals. He’s not Giovinco, but Rooney will help a D.C. side desperate for attention.
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2011 andelf <<EMAIL>> # See LICENSE for details. # Time-stamp: <2011-11-01 17:44:15 wangshuyu> from qqweibo.auth import OAuth1_0_Handler, OAuth2_0_Handler from qqweibo.api import API from qqweibo.parsers import (ModelParser, JSONParser, XMLRawParser, XMLDomParser, XMLETreeParser) from qqweibo.error import QWeiboError from qqweibo.cache import MemoryCache, FileCache OAuthHandler = OAuth1_0_Handler __all__ = ['API', 'QWeiboError', 'version', 'OAuth1_0_Handler', 'OAuth2_0_Handler', 'OAuthHandler', 'XMLRawParser', 'XMLDomParser', 'XMLETreeParser', 'ModelParser', 'JSONParser', 'MemoryCache', 'FileCache'] version = '0.3.9'
Genome subtraction for novel target definition in Salmonella typhi Large genomic sequencing projects of pathogens as well as human genome leads to immense genomic and proteomic data which would be very beneficial for the novel target identification in pathogens. Subtractive genomic approach is one of the most useful strategies helpful in identification of potential targets. The approach works by subtracting the genes or proteins homologous to both host and the pathogen and identify those set of gene or proteins which are essential for the pathogen and are exclusively present in the pathogen. Subtractive genomic approach is employed to identify novel target in salmonella typhi. The pathogen has 4718 proteins out of which 300 are found to be essential ( indispensable to support cellular life) in the pathogen with no human homolog. Metabolic pathway analyses of these 300 essential proteins revealed that 149 proteins are exclusively involved in several metabolic pathway of S. typhi. 8 metabolic pathways are found to be present exclusively in the pathogen comprising of 27 enzymes unique to the pathogen. Thus, these 27 proteins may serve as prospective drug targets. Sub-cellular localization prediction of the 300 essential proteins was done which reveals that 11 proteins lie on the outer membrane of the pathogen which could be probable vaccine candidates. Background: The availability of large amount of genomic data generated by the microbial genomes and the human genome project has revolutionized the field of drug-discovery against threatening human pathogens. These large sets of genomic data are useful in identification and characterization of the novel therapeutic targets and virulent factors prevalent in the pathogens. Subtractive genomic strategy is developed by assuming that the novel targets identified in the pathogen should be essential for the pathogen that is it should be involved in the replication, survival and a important component of various metabolic pathways and mechanisms occurring in the pathogen while at the same time should be absent on the host that is human and should have no homolog in human, so that when a drug or a lead compound is designed considering the potential target it should only be against the mechanism and functionality of the pathogen not the host. Subtractive genomics has been successfully used by authors to locate novel drug targets in Pseudomonas aeruginosa. The work has been effectively complemented with the compilation of the Database of Essential Genes (DEG) for a number of pathogenic microorganisms. The current studies make use of the subtractive genomics approach and DEG to analyze the complete genome of Salmonella typhi to search for potential vaccine candidates which would possibly lie on the surface membrane of the pathogen and drug targets. Salmonella enterica serovar typhi is a human-specific gram-negative pathogen causing enteric typhoid fever, a severe infection of the reticuloendothelial system,,. It has two strains CT18 (multiple drug resistant) and Ty with a complete proteome of 4718 proteins. Worldwide, typhoid fever affects roughly millions of people annually, causing deaths. Infection of S. typhi leads to the development of typhoid, or enteric fever. This disease is characterized by the sudden onset of a sustained and systemic fever, severe headache, nausea, and loss of appetite. Other symptoms include constipation or diarrhea, enlargement of the spleen, possible development of meningitis, and/or general depression. Untreated typhoid fever cases result in mortality rates ranging from 12-30% while treated cases allow for 99% survival. The early administration of antibiotic treatment has proven to be highly effective in eliminating infections, but indiscriminate use of antibiotics has led to the emergence of multidrug-resistant strains of S. enterica serovar Typhi. Chloramphenicol was the drug for the treatment of this infection till plasmid mediated chloramphenicol resistance was encountered. Following this ciprofloxacin became the mainstay of treatment being a safer and more effective drug than Chloramphenicol but after clinical resistance to treatment with ciprofloxacin in the patients suffering from enteric fever, the choice left now is an expensive drug like ceftriaxone or cefexime.. Resistance against ceftriaxone have been reported to CDC (Centre for Drug Control) mild to moderate side effects have been shown for ceftriaxone. The novel targets identified by us using subtractive genomics will help enable understanding the biology of the pathogen to provide a more cost effective medication. Methodology: The systematic identification and characterization of potential targets in salmonella typhi is illustrated in Figure 1. Retrieval of proteomes of host and pathogen: The complete proteome of Salmonella typhi were retrieved from SwissProt and protein sequences of Homo sapiens were downloaded from NCBI. The Database of Essential genes was accessed from its location http://tubic.tju.edu.cn/deg/. Identification of essential proteins in S. typhi: The S. typhi proteins were purged at 60% using CD-HIT to identify the paralogs or duplicates proteins within the proteome of S.typhi. The paralogs are excluded and the remaining sets of protein were subjected to BlastP against Homo sapiens protein sequences with the expectation value (E-value) cutoff of 10 -4. The resultant dataset obtained were with no homologs in Homo sapiens. BLASTP analysis was performed for the non homologous protein sequences of S. typhi against DEG with E-value cutoff score of 10 -100. A minimum bitscore cut-off of 100 was used to screen out genes that appeared to represent essential genes. The protein sequences obtained are non homologous essential proteins of S.typhi. Metabolic pathway analysis: Metabolic pathway analysis of the essential proteins of S. typhi was done by KAAS server at KEGG for the identification of potential targets. KAAS (KEGG Automatic Annotation Server) provides functional annotation of genes by BLAST comparisons against the manually curated KEGG GENES database. The result contains KO (KEGG Orthology) assignments and automatically generated KEGG pathways. Sub-cellular Localization prediction: Protein sub cellular localization prediction involves the computational prediction of where a protein resides in a cell. Prediction of protein sub cellular localization is an important component as it predicts the protein function and genome annotation, and it can aid the identification of targets. Sub-cellular localization analysis of the essential protein sequences has been done by Proteome Analyst Specialized Subcellular Localization Server v2.5 (PA-SUB) to identify the surface membrane proteins which could be probable vaccine candidates. Discussion: The results obtained through computational analysis reveals that out of 4718 proteins in salmonella typhi 159 were identified as duplicates through CD-HIT with 60% similarity. The remaining 4559 paralogs were subjected to subtractive genomics which leads to 3570 proteins. These 3570 proteins when subjected to blastp against DEG database showed 300 proteins, which were essential for the pathogen. The results for subtractive proteome approach, metabolic pathway analysis and sub cellular localization are listed in Table No. 1(Supplementary material). The purpose of the present studies was to locate those essential proteins of S. typhi that play vital roles in the normal functioning of the bacterium within the host and to pick out them in the view of targeting. Detection of non-human homologs in the essential proteins of S. typhi with subsequent screening of the proteome to find the resultant protein product are likely to lead to development of drugs that exclusively interact with the pathogen. The non-human homologs of the surface proteins would represent potential vaccine candidates. 300 of the essential proteins were without human homologs. Metabolic pathway analyses of these 300 essential proteins by KAAS server at KEGG revealed that out of 300, 149 proteins might be concluded to be unique and are invariably linked with essential metabolic and signal transduction pathways. Presumably, screening against such novel targets for functional inhibitors will result in discovery of novel therapeutic compounds active against bacteria, including the increased number of antibiotic resistant clinical strains. Metabolic pathway analyses of the 149 essential proteins revealed that 15 proteins are involved in Carbohydrate Metabolism, 10 in Energy Metabolism, 5 in Lipid Metabolism, 4 in Nucleotide Metabolism, 30 in Amino Acid Metabolism, 20 in Glycan Biosynthesis and Metabolism, 16 in Metabolism of Co-factors and Vitamins, 20 in genetic information processing, 26 in environmental information processing and 2 in human disease. The results are summarized in Table 2 (Supplementary material). Comparative analysis of the metabolic pathways of the host (Homo sapiens) and the pathogen (S.typhi) by using Kyoto Encyclopedia of Genes and Genomes (KEGG) reveals 8 pathways which are unique to S.typhi. Thereafter, each selected pathway was screened for the unique enzymes and proteins involved. The peptidoglycan layer of the bacterial cell wall is the major structural element which plays an important role in pathogenesis as it provides resistance to osmotic lysis. D-alanine is the central molecule in the peptidoglycan assembly and cross-linking. D-alanine-D-alanine ligase (ddlA) is an important target as it is involved in D-alanine metabolism. Lipopolysaccharides (LPS) are also one of the main constituents of the outer cell wall of gram negative bacteria and play an important role for the survival of the pathogen. Out of the 14 enzymes involved in LPS biosyntheseis pathway, 13 enzymes are found to be essential for the variability of the bacteria and could be probable drug targets and it did not show homology with any human protein. Two-component systems of bacteria represent the primary signal transduction paradigm in prokaryotic organisms. 8 essential enzymes were found to be potential targets in this pathway. Tryptophan synthase beta chain (trpB) is an important enzyme as it is involved in tyrosine and tryptophan biosynthesis pathway. Chemotaxis protein (MotA) and chemotaxis protein methyltransferase (CheR) is essential enzyme due to its involvement in multiple metabolic pathways like cell Motility, bacterial chemotaxis and flagellar assembly. Phosphoenolpyruvate (ppc) has been identified as a possible target due to its involvement in carbon fixation in photosynthetic organism, pyruvate metabolism and reductive carboxylase cycle. The focus of the present studies was to hunt for potential targets in S. typhi by computational approach. The sub-cellular localization prediction done by PA-SUB identify 11 proteins lying on the surface of the pathogen which could represent promising candidates for further characterization and analysis with a support to vaccine design. The results are summarized in Table No. Conclusion: The availability of full genomic and proteomic sequences generated from the sequencing projects along with the computer-aided softwares to identify and characterize probable drug targets is a new emerging trend in pharmacogenomics. The application of the Database of essential genes helps to identify the potential drug targets in pathogens. The current study helps in the characterization of the potential proteins that could be targets for efficient drug design against Salmonella typhi. As subtractive genomic approach is applied for the identification of drug targets, so the drug would be specific for the pathogen and not lethal to the host. Molecular modeling of the targets will decipher the best possible active sites that can be targeted by simulations for drug design. Virtual screening against these potential targets might be useful in the discovery of potential therapeutic compounds against Salmonella typhi. References: L Miesel et al., Nat Rev Genet. 4: 442 This is an open-access article, which permits unrestricted use, distribution, and reproduction in any medium, for noncommercial purposes, provided the original author and source are credit.
import { Buffer } from "https://deno.land/[email protected]/node/buffer.ts"; import { Stream } from "https://deno.land/[email protected]/node/stream.ts"; export class ChunkStream extends Stream { private _buffers: (Buffer | null)[] | null; private _buffered: number; private _reads: any[] | null; private _paused: boolean; private _encoding: string; private _writable: boolean; public constructor() { super(); this._buffers = []; this._buffered = 0; this._reads = []; this._paused = false; this._encoding = "utf8"; this._writable = true; } public read(length: number, callback) { this._reads.push({ length: Math.abs(length), // if length < 0 then at most this length allowLess: length < 0, func: callback, }); queueMicrotask( () => { this._process(); // its paused and there is not enought data then ask for more if (this._paused && this._reads && this._reads.length > 0) { this._paused = false; this.emit("drain"); } }, ); } public write(data: Buffer, encoding) { if (!this._writable) { this.emit("error", new Error("Stream not writable")); return false; } let dataBuffer; if (data instanceof Buffer) { dataBuffer = data; } else { dataBuffer = Buffer.from(data, encoding || this._encoding); } this._buffers.push(dataBuffer); this._buffered += dataBuffer.length; this._process(); // ok if there are no more read requests if (this._reads && this._reads.length === 0) { this._paused = true; } return this._writable && !this._paused; } public end(data, encoding) { if (data) { this.write(data, encoding); } this._writable = false; // already destroyed if (!this._buffers) { return; } // enqueue or handle end if (this._buffers.length === 0) { this._end(); } else { this._buffers.push(null); this._process(); } } public destroySoon(data, encoding) { return this.end(data, encoding); } private _end() { if (this._reads.length > 0) { this.emit("error", new Error("Unexpected end of input")); } this.destroy(); } public destroy() { if (!this._buffers) { return; } this._writable = false; this._reads = null; this._buffers = null; this.emit("close"); } private _processReadAllowingLess(read) { // ok there is any data so that we can satisfy this request this._reads.shift(); // == read // first we need to peek into first buffer const smallerBuf = this._buffers[0]; // ok there is more data than we need if (smallerBuf && smallerBuf.length > read.length) { this._buffered -= read.length; this._buffers[0] = smallerBuf.slice(read.length); read.func.call(this, smallerBuf.slice(0, read.length)); } else { // ok this is less than maximum length so use it all this._buffered -= smallerBuf.length; this._buffers.shift(); // == smallerBuf read.func.call(this, smallerBuf); } } private _processRead(read) { this._reads.shift(); // == read let pos = 0; let count = 0; const data = new Buffer(read.length); // create buffer for all data while (pos < read.length) { const buf = this._buffers[count++]; const len = Math.min(buf.length, read.length - pos); buf.copy(data, pos, 0, len); pos += len; // last buffer wasn't used all so just slice it and leave if (len !== buf.length) { this._buffers[--count] = buf.slice(len); } } // remove all used buffers if (count > 0) { this._buffers.splice(0, count); } this._buffered -= read.length; read.func.call(this, data); } private _process() { try { // as long as there is any data and read requests while (this._buffered > 0 && this._reads && this._reads.length > 0) { const read = this._reads[0]; // read any data (but no more than length) if (read.allowLess) { this._processReadAllowingLess(read); } else if (this._buffered >= read.length) { // ok we can meet some expectations this._processRead(read); } else { // not enought data to satisfy first request in queue // so we need to wait for more break; } } if (this._buffers && !this._writable) { this._end(); } } catch (ex) { this.emit("error", ex); } } }
Microstructure of biopolymer-modified aerial lime mortars Cellulose-based viscosity-modifying admixtures are used on daily basis in a wide range of building materials, dominantly in pre-mixed mortar systems. Lately, alternative admixtures such as different gums have emerged. In building materials, the gums have similar viscosityenhancing effects as cellulose ethers with different efficiency. Various ways of production, as well as diverse working mechanisms of the biopolymers are to be considered while choosing the most suitable admixture for the intended use. Influence of alternative admixtures in several doses on the microstructure of lime mortars was studied in this paper. Mortars were prepared with the same workability and the air content in fresh mortar was determined, for it can have notable impact on microstructure of hardened mortar. Hydroxypropyl derivative of chitosan showed air entraining ability, while the carboxymethyl derivative reported slight decrease in amount of air entrained into the mixture. In the case of diutan gum, the most water-demanding admixture, the percentage of entrained air dropped with growing dose. Admixture addition increased the volume of pores in diameter lower than 0.1 m, and in region of pores around 10 m. Only the diutan gum affected the distribution of capillary pores, which are typical for lime mortars. Hygric properties were in correlation with air content values. Introduction The aerial-lime as a binder has been used thorough the centuries for the whole complex building system from the filling concrete in the hollow masonry up to the fine decorative plastering. Poor durability is the main disadvantage of lime-based materials, especially while used in exterior conditions, where it requires continuous maintenance. Where the upkeep is omitted or the complexity of the system is disrupted by the incompatible materials the inevitable damage to the building occurs causing irreversible loss of architectural and cultural heritage. The durability of the material is determined by a large amount of factors starting from the application and curing conditions up to the microstructure of the final product. According to A. Izaguirre et al, the durability and mechanical properties of lime mortars are closely linked with water absorption capacity, which is mostly affected by the porosity of the material. The porosity is affected by several different influences, mainly the amount of kneading water, and the chemical and mineralogical composition of the aggregate. It is generally acknowledged that the calcareous aggregate (mostly limestone) produces mortars with slightly better parameters in comparison with silicate aggregate. However the theories to explain the mechanisms behind are various and even the effects on the porosity of mortars are significantly different in studies with seemingly similar parameters. The total porosity and the main pore diameter increase in the lime paste with growing water content however, the physico-mechanical properties of aerial lime are reported to be unaffected by the higher porosity, thus being non-compliant with the Power equation generally accepted in the world of hydraulic binders. Even though, the amount of kneading water is still the factor affecting the strength of the lime mortar, but with lower sensibility in the range of usual values in comparison with hydraulic binders. The use of water-retentive admixtures, usually with viscosity-modifying function, is widely spread in building materials industry, mainly in the ready-mix products and selfconsolidating concrete. These, mostly cellulose-based, admixtures ensure that the sufficient amount of water remains in the mixture thorough the curing time, and the water is not unduly blotted by the support material or the environmental conditions such as sun or wind. Apart from cellulose-based materials, the admixtures based on biopolymers from other natural sources are being studied and used. These alternative biopolymeric admixtures were found more suitable for aerial-lime based materials than the cellulose ethers. The study focuses on direct comparison of effects of the four alternative biopolymeric admixtures, hydroxypropyl and carboxymethyl derivatives of chitosan (HPCH and CMCH respectively) as representatives of artificially modified substances, diutan gum (DG) representing biopolymers of microbial origin, and sodium salt of alginic acid (ALGNA) obtained from seaweed, similarly to agar or carrageenan, on the microstructure of aerial lime-based mortars. The use of most of the admixtures in lime mortars was studied mainly by the authors using constant water/binder ratio which unfavourably affected the results by highly altered workability, especially in higher doses of more efficient admixtures. This study aims to exclusion of this undesirable effect by preparing all the mortars with the same consistency according to European standards. This approach is also closer to the practical application of mortars, where the workers are used to materials with similar behaviour regardless of their composition. Materials and sample preparation The mortars were prepared by introducing the dry mixture composing of lime hydrate of CL 90 S class according EN 459-1 (Carmeuse Czech republic), siliceous sand of 0-4 mm fraction, and biopolymeric admixture (Table1) in the dose of 0.1 %, 0.5 %, and 1 % of binder weight into the specified amount of water (Table 1) to ensure the workability of 160±5 mm determined by the flow table test according to EN 1015-3. The mixtures were cast into prismatic moulds 4040160 mm and demoulded after 72 hours. The samples were cured under laboratory conditions (20 °C, 55 % RH) until the tests, which were performed in the age of 90 days. Methods The porosity of the samples was determined by mercury intrusion porosimetry using Micrometrics Pore sizer 9310. From the data gathered, the total porosity of specimens and pore size distribution were obtained. The hygric properties of the prepared mortars were specified by water absorption coefficient due to a capillarity action according to EN 1015-18. Chosen specimens were subjected to scanning electron microscopy (SEM). Pore size distribution The porosity of specimens is visualised as cumulative pore volume curves (full lines) and differential pore volume (pore volume density) (dotted/dashed lines) in the Fig. 1 and Fig. 2 with cut-outs focusing on the range of large pores from 1 to 100 m (due to a better visibility the differential pore volume has inverted y-axis in the cut-out). The shape of the cumulative curves is typical for the lime-based materials with the main volume of pores situated in the range between 0.1 and 1 m . In this interval the shape of the curve remains similar for the chitosan ethers as well as two lower doses of sodium alginate, seemingly so biopolymer addition compensates the influence of the higher water content, while in the case of diutan gum, the growing dosage (and water content) leads to the flatter curve, thus wider interval of the typical pore sizes. The onset of the DG curves moves significantly to the area of larger diameter pores, as observed by M. Arandigoyen et al. for the mortars with growing water content, whereas the transition is not as significant as would be expected based on these results, indicating that even in this case the biopolymer addition has positive impact in the way of compensating much higher water/lime ratio. Generally, the addition of biopolymer slightly reduces the volume of pores in this area, ALGNA and CMCH with linear dosage dependency (the higher the dosage the higher the pore volume reduction), whereas HPCH and DG showed highest reduction in the dosage of 0.5 %. This is contrary to the results observed by A. Izaguirre et al. on the lime mortars with guar gum addition, where the interval became narrower and the volume was slightly promoted. The effect of CMCH addition observed by M. Lasheras-Zubiate on cementitious material is not present in the case of lime-based mortars. Second largest volume increment is in the area of large pores of diameter around 10 m. These pores are formed during the mixing period and are indicating the different rheology of mortars despite the similar workability defined by flow table test. The most significant difference in behaviour is between ALGNA, DG and chitosan derivatives where the air entraining function of HPCH (Table 1) leads to notable increase in the volume of these pores without affecting their size distribution and other chitosan derivatives slightly reduce the amount of large pores. The ALGNA and DG have no proven air entraining function in lime mortars, thus the increase in volume is caused only by different behaviour of mortar during mixing. By these means, ALGNA in the highest dosage and DG in the 0.5 % dose not only promote the volume of large pores, but also support the evolution of pores with larger diameter. The total porosity (Fig. 3) is affected by the different volumes of large pores; however, it is mildly increased for all admixtures and doses, even the ones which reduce the volume of larger pores. The cause of the increase in the case of admixtures which do not help to develop large diameter pores can be seen in Fig. 1, where the slope of the cumulative pore volume curve is different for the reference mortar and the admixtured ones for the pores smaller than 0.1 m. The shape of curves of all mortars with admixtures is within this interval different from the shape of curve of reference mortar meaning, that the admixtures gently promote the evolution of the small pores. This increase can be seen also on the charts from A. Izaguirre et al. in their works in the case of guar gum and its hydroxypropyl derivative. The hygric properties of mortars described by capillary water coefficient and apparent moisture diffusivity are shown in Table 2. The capillary water coefficient, which is the gradient of the linear part of the absorption curve, is lower for the mortars with HPCH, lowest dose of CMCH, and highest dose of ALGNA, meaning, that these admixtures in these doses hinder the moisture transport in the mortars, which results in their higher durability. The capillary water absorption coefficient reduction is caused by interruption of capillaries by air voids, thus mainly the mortars with high air content performed well. The exception is in the case of ALGNA 1, where air content in fresh mortar is not notably higher than in the reference mortar but, taking into account pore size distribution in Fig. 2, it contains higher volume of large pores in hardened state. The various behaviour of DG 0.5 and DG 1.0 mortars is probably also caused by their diverse pore size distribution. Scanning electron microscopy On the most mortar samples studied by SEM, there are no signs of the biopolymers influencing the microstructure of the samples. As seen in Fig. 4a, the binder's microstructure in all cases consists of portlandite and calcite crystals with a capillary pores between them complemented by small amount of ettringite (rod-like crystals on the leftmost part of the Fig 4a) (ettringite is formed due to an impurities in kneading water and sand). Only in the case of DG addition (Fig. 4b), there is a noticeable change in the appearance of the binder phase caused by the web-like structures connecting the crystal clusters. These structures are found only in DG-modified mortars, and are therefore the direct consequence of the DG addition into lime material; they can be also responsible for the altered pore size distribution (Fig. 2) in DG 1.0 mortar. Conclusions The influence of four biopolymeric admixtures (hydroxypropyl-and carboxymethylchitosan, diutan gum, and sodium alginate) on the microstructure of aerial lime-based mortars with constant consistency was studied. Hydroxypropyl chitosan had, as the only one of the studied admixtures, air-entraining function. This promoted the occurrence of pores of diameter around 10 m and increase in total porosity. The air entraining function is beneficial for the reduction of capillary water coefficient, and thus improvement of durability. Carboxymethyl chitosan in the lowest dose of 0.1 % also decreased the capillary water coefficient, but with no significant alteration of pore size distribution. Diutan gum, as the most water-demanding admixture, increased the total porosity of mortars, in the dose of 0.5 % promoted evolution of large pores around 10 m, and in the dose of 1.0 % highly altered the pore size distribution of the mortars. Due to low air content in the mortars in combination with high total porosity, the samples showed the fastest moisture transport, impairing the durability. Diutan gum was the only admixture, addition of which led to modifications in the structure observable by scanning electron microscopy. Sodium alginate in lower doses had no significant impact on porosity of mortar and the hygric properties remained similar to reference mortar; however, in the highest dose, the increase in volume of larger pores took place, and the moisture transport in the mortar was distinctly slowed. The hydroxypropyl chitosan addition was found to be most useful for impairing the moisture transport, thus improving the durability of prepared mortars, but also the use of sodium alginate should be considered. The mixtures containing these two admixtures are to be subjected to durability tests to furtherly evaluate their impact on the properties of lime mortars. The carboxymethyl chitosan and diutan gum have showed none, or negative effect, respectively on the presumed durability of the materials and were not found beneficial for the mortars, at least in the studied doses. The work has been financially supported by BUT specific research project. no. FAST-J-20-6289.
Individual differences in the representation of sentences in memory. A cued recall procedure was used to assess the nature of the memory representation that underlies the ability of mentally retarded and nonretarded individuals to remember single sentences. Mentally retarded, equal-CA, and equal-MA subjects listened to a list of sentences after which their ability to recall the object noun of the sentence was assessed when they were provided recall cues that contained (a) only the subject noun of the original sentence, (b) only the verb of the sentence, or (c) both the subject and verb. As expected, performance for all groups was best when they were provided the subject plus verb cue relative to the single word cues. In addition, the groups differed in the magnitude of this two-word cue advantage, with the retarded subjects exhibiting the smallest and the equal-CA subjects exhibiting the largest advantage. This finding reflects a difference in the degree to which mentally retarded and nonretarded individuals construct sentence representations that more precisely specify the meaning of the sentence through the integration of its constituents.
package com.paypal.android.sdk.payments; import android.app.Activity; import android.os.Bundle; import com.paypal.android.sdk.C11756Pb; import com.paypal.android.sdk.C11761Rb; public final class FuturePaymentInfoActivity extends Activity { /* renamed from: a */ private C11942j f37307a; /* access modifiers changed from: protected */ public final void onCreate(Bundle bundle) { super.onCreate(bundle); C11939i iVar = (C11939i) getIntent().getExtras().getSerializable("com.paypal.details.scope"); setTheme(16973934); requestWindowFeature(8); this.f37307a = new C11942j(this, iVar); setContentView(this.f37307a.f37564a); C11867Da.m39239a((Activity) this, this.f37307a.f37565b, (C11761Rb) null); this.f37307a.f37569f.setText(C11756Pb.m38949a(C11761Rb.BACK_BUTTON)); this.f37307a.f37569f.setOnClickListener(new C11936h(this)); } }
<gh_stars>10-100 /* +----------------------------------------------------------------------+ | Collection Extension | +----------------------------------------------------------------------+ | Copyright (c) 2016-2018 The Viest | +----------------------------------------------------------------------+ | http://www.viest.me | +----------------------------------------------------------------------+ | Author: viest <<EMAIL>> | +----------------------------------------------------------------------+ */ #ifndef PHP_COLLECTION_H #define PHP_COLLECTION_H extern zend_module_entry collection_module_entry; #define phpext_collection_ptr &collection_module_entry #define PHP_COLLECTION_VERSION "0.1.0" #ifdef PHP_WIN32 # define PHP_COLLECTION_API __declspec(dllexport) #elif defined(__GNUC__) && __GNUC__ >= 4 # define PHP_COLLECTION_API __attribute__ ((visibility("default"))) #else # define PHP_COLLECTION_API #endif #ifdef ZTS #include "TSRM.h" #endif #define COLLECTION_STARTUP_MODULE(module) ZEND_MODULE_STARTUP_N(vtiful_collection_##module)(INIT_FUNC_ARGS_PASSTHRU) #define COLLECTION_STARTUP_FUNCTION(module) ZEND_MINIT_FUNCTION(vtiful_collection_##module) #if defined(ZTS) && defined(COMPILE_DL_COLLECTION) ZEND_TSRMLS_CACHE_EXTERN() #endif PHP_MINIT_FUNCTION(collection); PHP_MSHUTDOWN_FUNCTION(collection); PHP_RINIT_FUNCTION(collection); PHP_RSHUTDOWN_FUNCTION(collection); PHP_MINFO_FUNCTION(collection); #endif /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */
<gh_stars>0 import { BufferCursor } from "./buffercursor"; export function bytesToString(source: Buffer | BufferCursor): string { const buf = source instanceof BufferCursor ? source.buffer : source; const bytes = []; for (const value of buf) { bytes.push((value >= 33 && value <= 126) ? value : 46); } return Buffer.from(bytes).toString("ascii"); } export function splitInGroups(buf: BufferCursor) { const groups = []; let partLen = 0; do { partLen = buf.readUInt32LE(); if (partLen !== 0) { groups.push(buf.slice(partLen - 4)); } } while (buf.tell() < buf.length); return groups; } export const prefixJoin = (buffers: Buffer[]) => Buffer.concat(buffers.map(v => { const buf = Buffer.allocUnsafe(4); buf.writeUint32LE(v.byteLength + 4); return Buffer.concat([buf, v]); }));
Planktonic food web structure and potential carbon flow in the Lower River Rhine with a focus on the role of protozoans Studies during the last two decades have revealed the importance of planktonic microorganisms in the aquatic matter flux. However, studies on the planktonic food web structure have mostly concentrated on lentic waters. In the present study the biomass partitioning of the major plankton groups (bacteria, algae, heterotrophic flagellates (HF), ciliates and metazoans) in a large river (Lower River Rhine) has been analysed for a complete annual cycle and calculations on potential carbon flow within the planktonic food web were conducted for the four seasons. The plankton biomass was dominated by phytoplankton followed by bacterioplankton. The zooplankton was dominated by HF, contributing more than 65% of the total zooplankton biomass in all seasons. A significant part of the HF biomass was contributed by large flagellates (>20m). In accordance with the dominance of the HF and their high growth rates, this group contributed the largest part of the planktonic matter turnover within the zooplankton. The calculations suggest that the HF were able to consume between 11 and 65% of the seasonal mean bacterial production and that the HF consumed a larger amount of phytoplankton than both ciliates and metazoans. According to these calculations, only a small part of the total potential production of the phytoplankton as well as that of the total zooplankton was consumed by planktonic predators. Since the plankton abundance did not generally increase during the downstream passage, the present data suggest that a large part of the plankton production is lost by other means. The role of benthic predators has been discussed in this context. Copyright © 2005 John Wiley & Sons, Ltd.
# A. Buy the String t=int(input()) for i in range(t): n,c0,c1,h=map(int,input().split()) s=input() x=s.count("0") y=s.count("1") tempx=x tempy=y a=[] i=0 while x>-1: c=x*c0+(n-x)*c1+i*h a.append(c) x-=1 i+=1 k=0 while tempy>-1: d=tempy*c1+(n-tempy)*c0+k*h a.append(d) tempy-=1 k+=1 print(min(a))
#ifndef GAME_CONFIG_GAMECFG_H_ #define GAME_CONFIG_GAMECFG_H_ //C system headers //C++ system headers #include <cstdint> #include <unordered_map> //Other libraries headers //Own components headers #include "game/panels/config/PiecePromotionPanelCfg.h" struct GameCfg{ PiecePromotionPanelCfg piecePromotionPanelCfg; int32_t chessBoardRsrcId; int32_t whitePiecesRsrcId; int32_t blackPiecesRsrcId; int32_t targetRsrcId; int32_t blinkTargetId; int32_t fpsId; int32_t unfinishedPieceFontId; int32_t moveTailId; int32_t textId; }; #endif /* GAME_CONFIG_GAMECFG_H_ */
A Primer on NoSQL Databases for Enterprise Architects: The CAP Theorem and Transparent Data Access with MongoDB and Cassandra MongoDB and Apache Cassandra are the dominant Not Only SQL (NoSQL) database management systems for persisting structured records. Moreover, the pair are respectively in the top-five and top-ten of database management systems generally. Therefore this work seeks to present the two leading systems, along with the underlying principle of the CAP Theorem, in the context of creating transparent data access tiers capable of supporting flexible enterprise architectures.
<filename>src/components/article/index.tsx import ArticleItem from './article-item' export { ArticleItem }
A city’s attempt to force churches and other religious groups to align with its gender-identity and sexual-orientation agenda has been slapped down by a judge. The city claimed that any time a church is open to the public, outside the role of a house of worship, it is subject to the ordinance. On Friday, however, Brown County Judge William Atkinson granted a summary judgment to the churches and denied the city’s request to dismiss the suit. The ordinance demanded “non-discrimination” on the basis of gender identity and sexual orientation in housing, employment, advertising and public accommodations. There was no exemption for religious groups. “These religious organizations stood up for their convictions and fought back against intolerant and aggressive LGBT policies,” Dacus said. “These pastors, who preach from their pulpits on marriage and sexual ethics, hold their ministries to those biblical standards in areas of community outreach, employment, and advertising,” PJI said. But the judge ruled that, effective immediately, religious institutions are exempt from the ordinance. “This court victory came just in time for these churches who have treaded cautiously under the burden of this government orthodoxy,” said PJI attorney Kevin Snider. The case filed was on behalf of Hope Lutheran, Crosspoint, Destiny, St. Mark Lutheran and Christ the Rock churches, as well as a local radio station. “The city of De Pere refused to provide any such assurances, leaving the religious institutions vulnerable absent court action,” the organization said.
Balanced Bodies Osteopathy are dedicated to providing each and every individual with the most thorough musculoskeletal care possible. Located right next to Fire in the belly, in Dawson St, Lismore, Balanced Bodies Osteopathy was established in 2014 , by Dr Leah Dwyer. Leah trained at Southern Cross University for 5 years. Completing her Bachelors degree in 2011, she then went on to complete her Masters degree in 2013, majoring in Osteopathic Medicine and human structure and function. In her final year, Leah researched therapeutic effect and its overall contribution in osteopathic treatment. Leah also has a keen interest in treating women enduring the changes that take place during/after pregnancy and child birth, as well as womens health, sporting injuries, the effects of aging and chronic pain in general. Here at Balanced Bodies Osteopathy, we are dedicated to providing the best service, to help you in reaching your overall goals in health. Make an appointment today.
def counterfactual(x, options, delta_dims, sf=3): if type(options) == tuple: options = [options] delta_dims = options[0][0].space.idxify(delta_dims) dim_names, operators, options_text = options[0][0].space.dim_names, [None,">=","<"], [] for leaf, x_closest, l0, l2 in options: terms = [] for d, diff in enumerate(np.sign(x_closest - x)): if d in delta_dims and diff: terms.append(f"{dim_names[d]} {operators[int(diff)]} {round_sf(x_closest[d], sf)}") if terms: options_text.append(" and ".join(terms) + f" (l0 = {int(l0)}, weighted l2 = {round_sf(l2, sf)})") else: options_text.append("Foil already satisfied.") return "\nor\n".join(options_text)
""" Sets up the terminal color scheme. """ import functools import os import sys from django.utils import termcolors def supports_color(): """ Return True if the running system's terminal supports color, and False otherwise. """ plat = sys.platform supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ) # isatty is not always implemented, #6223. is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() if not supported_platform or not is_a_tty: return False return True class Style: pass def make_style(config_string=''): """ Create a Style object from the given config_string. If config_string is empty django.utils.termcolors.DEFAULT_PALETTE is used. """ style = Style() color_settings = termcolors.parse_color_setting(config_string) # The nocolor palette has all available roles. # Use that palette as the basis for populating # the palette as defined in the environment. for role in termcolors.PALETTES[termcolors.NOCOLOR_PALETTE]: if color_settings: format = color_settings.get(role, {}) style_func = termcolors.make_style(**format) else: def style_func(x): return x setattr(style, role, style_func) # For backwards compatibility, # set style for ERROR_OUTPUT == ERROR style.ERROR_OUTPUT = style.ERROR return style @functools.lru_cache(maxsize=None) def no_style(): """ Return a Style object with no color scheme. """ return make_style('nocolor') def color_style(): """ Return a Style object from the Django color scheme. """ if not supports_color(): return no_style() return make_style(os.environ.get('DJANGO_COLORS', ''))
Radial probe endobronchial ultrasound for the diagnosis of peripheral lung cancer: systematic review and meta-analysis Improved diagnostic sensitivity of bronchsocopy for the investigation of peripheral pulmonary lesions (PPLs) with the use of radial probe endobroncial ultrasound (EBUS) has been reported, although diagnostic performance varies considerably. A systematic review of published literature evaluating radial probe EBUS accuracy was performed to determine point sensitivity and specificity, and to construct a summary receiver-operating characteristic curve. Sub-group analysis and linear regression was used to identify possible sources of study heterogeneity. 16 studies with 1,420 patients fulfilled inclusion criteria. Significant inter-study variation in EBUS method was noted. EBUS had point specificity of 1.00 (95% CI 0.991.00) and point sensitivity of 0.73 (95% CI 0.700.76) for the detection of lung cancer, with a positive likelihood ratio of 26.84 (12.6057.20) and a negative likelihood ratio of 0.28 (0.230.36). Significant inter-study heterogeneity for sensitivity was observed, with prevalence of malignancy, lesion size and reference standard used being possible sources. EBUS is a safe and relatively accurate tool in the investigation of PPLs. Diagnostic sensitivity of EBUS may be influenced by the prevalence of malignancy in the patient cohort being examined and lesion size. Further methodologically rigorous studies on well-defined patient populations are required to evaluate the generalisability of our results.
import torch from data import MatrixDataset, ToTorchDataset from torch import nn, optim from torch.nn.modules import loss from torch.optim import Adam from torch.utils.data import DataLoader from utils.evaluation import mae, mse, rmse from root import absolute from .model import MLPModel # 冻结随机数 from utils.model_util import freeze_random # 日志 from utils.mylogger import TNLog """ RESULT MLP: Density:0.05,type:rt,mae:0.4674951136112213,mse:1.8543723821640015,rmse:1.3617534637451172 """ freeze_random() # 冻结随机数 保证结果一致 logger = TNLog('MLP') logger.initial_logger() for density in [0.05, 0.10, 0.15, 0.20]: type_ = "rt" rt_data = MatrixDataset(type_) train_data, test_data = rt_data.split_train_test(density) train_dataset = ToTorchDataset(train_data) test_dataset = ToTorchDataset(test_data) train_dataloader = DataLoader(train_dataset, batch_size=64) test_dataloader = DataLoader(test_dataset, batch_size=64) lr = 0.01 epochs = 100 loss_fn = nn.L1Loss() dim = 12 mlp = MLPModel(loss_fn, rt_data.row_n, rt_data.col_n, dim=dim) opt = Adam(mlp.parameters(), lr=lr) mlp.fit(train_dataloader, epochs, opt, eval_loader=test_dataloader, save_filename=f"Density-{density}") # y, y_pred = mlp.predict(test_dataloader, True, # "/Users/wenzhuo/Desktop/研究生/科研/QoS预测实验代码/SCDM/output/FedMLPModel/loss_0.4504.ckpt") # mae_ = mae(y, y_pred) # mse_ = mse(y, y_pred) # rmse_ = rmse(y, y_pred) # # logger.info(f"Density:{density},type:{type_},mae:{mae_},mse:{mse_},rmse:{rmse_}") def predict(): y, y_pred = mlp.predict(test_dataloader, True, "/Users/wenzhuo/Desktop/研究生/科研/QoS预测实验代码/SCDM/output/FedMLPModel/loss_0.4504.ckpt") mae_ = mae(y, y_pred) mse_ = mse(y, y_pred) rmse_ = rmse(y, y_pred) logger.info(f"Density:{density},type:{type_},mae:{mae_},mse:{mse_},rmse:{rmse_}")
Victoria’s new technology minister has confirmed the state’s fledgling Coalition Government does not support the introduction of Labor’s preferred ‘opt-out’ model for the National Broadband Network (NBN). In early December, the state’s new Premier Ted Baillieu gave a strong indication he was against the opt-out model, saying it should be “optional” for premises to receive fibre. However, it has remained unclear if Bailieu’s off the cuff comments represented Coalition policy — until now. “The position of the government is that it does not support the introduction of ‘opt-out’ policy,” the state’s new technology minister, Gordon Rich-Phillips, said. He didn’t elaborate further on the Coalition’s reasoning for the policy decision. However, he noted the State Government was in ongoing discussions with its Federal counterpart in relation to the NBN. “Current matters being discussed are around establishing the nerve centre and NBN Co’s network operations centre in Melbourne,” Rich-Phillips said. The decision by the Victorian Government to block the NBN opt-out model will place the state at loggerheads with Federal Communications Minister Stephen Conroy, who has expressed a strong preference for the opt-out model. In addition, it displays division within the Coalition itself about the issue. In Tasmania, the Labor State Government has legislated to support an opt-out model at the urging of the Opposition, with the state attempting to make the most of the technology rollout. Most of the other state governments are yet to take a formal policy decision on the matter; and NSW, for one, is currently in a state of flux as it prepares for an expected change of Government to the Coalition early in 2011. Victoria’s choice will also likely force NBN Co into conducting a higher degree of education campaigns in the state, as it attempts to convince residents to opt-in to the fibre rollout. It has already started conducting such efforts in early stage rollout areas throughout mainland Australia.
Organization of care and diagnosed depression among women veterans. OBJECTIVE To analyze the association between the organizational features of integration of physical and mental healthcare in womens health clinics and the diagnosis of depression among women veterans with or at risk for cardiovascular conditions (ie, diabetes mellitus, heart disease, or hypertension). STUDY DESIGN Retrospective and observational secondary data analyses. METHODS We studied 27,972 women veterans from 118 facilities with diagnosed cardiovascular conditions in fiscal year 2001 (FY2001) using merged Medicare claims and Veterans Health Administration (VHA) data merged with the 1999 VHA Survey of Primary Care Practices and the 2001 VHA Survey of Women Veterans Health Programs and Practices. The dependent variable was a binary indicator for diagnosed depression during FY2001 at the individual level. We used a multilevel logistic regression model to control for clustering of women veterans within facilities. Individual-level independent variables included demographics, socioeconomic characteristics, and chronic physical conditions. RESULTS Overall, 27% of women veterans using the VHA were diagnosed as having depression in FY2001. Across facilities, rates of diagnosed depression varied from 13% to 41%. After controlling for individual-level and facility-level independent variables, women veterans who were served in separate women's health clinics with integrated physical and mental healthcare were more likely to have diagnosed depression. The adjusted odds ratio was 1.12 (95% confidence interval, 1.01-1.25). CONCLUSIONS Existing women-specific VHA organizational features with integration of primary care and mental health seem effective in diagnosing depression. Emerging patient-centered medical home models may facilitate diagnosis and treatment of mental health issues among women with complex chronic conditions.
Apple lined up another record quarter (shocker) selling an incredible 20.34 million iPhones, 9.25 million iPads and 3.95 million Macs. iPods were down 20% YOY to 7.9M. The company earned $7.31 billion in net profit on revenue of $28.57 billion for the June quarter. Both are records for Apple…and it isn’t even a holiday quarter. “We’re thrilled to deliver our best quarter ever, with revenue up 82 percent and profits up 125 percent,” said Steve Jobs, Apple’s CEO. “Right now, we’re very focused and excited about bringing iOS 5 and iCloud to our users this fall.” Press release and charts follow: Apple Reports Third Quarter Results All-Time Record Revenue and Earnings iPhone Sales Grow 142 Percent; iPad Sales Grow 183 Percent CUPERTINO, Calif.–(BUSINESS WIRE)–Apple® today announced financial results for its fiscal 2011 third quarter ended June 25, 2011. The Company posted record quarterly revenue of $28.57 billion and record quarterly net profit of $7.31 billion, or $7.79 per diluted share. These results compare to revenue of $15.70 billion and net quarterly profit of $3.25 billion, or $3.51 per diluted share, in the year-ago quarter. Gross margin was 41.7 percent compared to 39.1 percent in the year-ago quarter. International sales accounted for 62 percent of the quarter’s revenue. “Right now, we’re very focused and excited about bringing iOS 5 and iCloud to our users this fall.” The Company sold 20.34 million iPhones in the quarter, representing 142 percent unit growth over the year-ago quarter. Apple sold 9.25 million iPads during the quarter, a 183 percent unit increase over the year-ago quarter. The Company sold 3.95 million Macs during the quarter, a 14 percent unit increase over the year-ago quarter. Apple sold 7.54 million iPods, a 20 percent unit decline from the year-ago quarter. “We’re thrilled to deliver our best quarter ever, with revenue up 82 percent and profits up 125 percent,” said Steve Jobs, Apple’s CEO. “Right now, we’re very focused and excited about bringing iOS 5 and iCloud to our users this fall.” “We are extremely pleased with our performance which drove quarterly cash flow from operations of $11.1 billion, an increase of 131 percent year-over-year,” said Peter Oppenheimer, Apple’s CFO. “Looking ahead to the fourth fiscal quarter of 2011, we expect revenue of about $25 billion and we expect diluted earnings per share of about $5.50.” Apple will provide live streaming of its Q3 2011 financial results conference call beginning at 2:00 p.m. PDT on July 19, 2011 at www.apple.com/quicktime/qtv/earningsq311. This webcast will also be available for replay for approximately two weeks thereafter. This press release contains forward-looking statements [blah blah…] Apple designs Macs, the best personal computers in the world, along with OS X, iLife, iWork and professional software. Apple leads the digital music revolution with its iPods and iTunes online store. Apple has reinvented the mobile phone with its revolutionary iPhone and App Store, and has recently introduced iPad 2 which is defining the future of mobile media and computing devices. NOTE TO EDITORS: For additional information visit Apple’s PR website (www.apple.com/pr), or call Apple’s Media Helpline at (408) 974-2042. © 2011 Apple Inc. All rights reserved. Apple, the Apple logo, Mac, Mac OS and Macintosh are trademarks of Apple. Other company and product names may be trademarks of their respective owners. Apple Inc.UNAUDITED CONDENSED CONSOLIDATED STATEMENTS OF OPERATIONS(In millions, except share amounts which are reflected in thousands and per share amounts) Three Months Ended Nine Months Ended June 25, 2011 June 26,2010 June 25, 2011 June 26,2010 Net sales $ 28,571 $ 15,700 $ 79,979 $ 44,882 Cost of sales (1) 16,649 9,564 47,541 26,710 Gross margin 11,922 6,136 32,438 18,172 Operating expenses: Research and development (1) 628 464 1,784 1,288 Selling, general and administrative (1) 1,915 1,438 5,574 3,946 Total operating expenses 2,543 1,902 7,358 5,234 Operating income 9,379 4,234 25,080 12,938 Other income and expense 172 58 334 141 Income before provision for income taxes 9,551 4,292 25,414 13,079 Provision for income taxes 2,243 1,039 6,115 3,374 Net income $ 7,308 $ 3,253 $ 19,299 $ 9,705 Earnings per common share: Basic $ 7.89 $ 3.57 $ 20.91 $ 10.69 Diluted $ 7.79 $ 3.51 $ 20.63 $ 10.51 Shares used in computing earnings per share: Basic 926,108 912,197 922,917 907,762 Diluted 937,810 927,361 935,688 923,341 (1) Includes stock-based compensation expense as follows: Cost of sales $ 52 $ 38 $ 155 $ 112 Research and development $ 119 $ 80 $ 336 $ 240 Selling, general and administrative $ 113 $ 101 $ 379 $ 303 Apple Inc.UNAUDITED CONDENSED CONSOLIDATED BALANCE SHEETS(In millions, except share amounts) June 25, 2011 September 25, 2010 ASSETS: Current assets: Cash and cash equivalents $ 12,091 $ 11,261 Short-term marketable securities 16,304 14,359 Accounts receivable, less allowances of $55 in each period 6,102 5,510 Inventories 889 1,051 Deferred tax assets 1,892 1,636 Vendor non-trade receivables 5,369 4,414 Other current assets 4,251 3,447 Total current assets 46,898 41,678 Long-term marketable securities 47,761 25,391 Property, plant and equipment, net 6,749 4,768 Goodwill 741 741 Acquired intangible assets, net 1,169 342 Other assets 3,440 2,263 Total assets $ 106,758 $ 75,183 LIABILITIES AND SHAREHOLDERS’ EQUITY: Current liabilities: Accounts payable $ 15,270 $ 12,015 Accrued expenses 7,597 5,723 Deferred revenue 3,992 2,984 Total current liabilities 26,859 20,722 Deferred revenue – non-current 1,407 1,139 Other non-current liabilities 9,149 5,531 Total liabilities 37,415 27,392 Commitments and contingencies Shareholders’ equity: Common stock, no par value; 1,800,000,000 shares authorized; 926,903,779 and 915,970,050 shares issued and outstanding, respectively 12,715 10,668 Retained earnings 56,239 37,169 Accumulated other comprehensive income/(loss) 389 (46 ) Total shareholders’ equity 69,343 47,791 Total liabilities and shareholders’ equity $ 106,758 $ 75,183 Apple Inc.UNAUDITED CONDENSED CONSOLIDATED STATEMENTS OF CASH FLOWS(In millions) Nine Months Ended June 25, 2011 June 26, 2010 Cash and cash equivalents, beginning of the period $ 11,261 $ 5,263 Operating activities: Net income 19,299 9,705 Adjustments to reconcile net income to cash generated by operating activities: Depreciation, amortization and accretion 1,271 698 Stock-based compensation expense 870 655 Deferred income tax expense 2,232 1,298 Changes in operating assets and liabilities: Accounts receivable, net (592 ) (79 ) Inventories 162 (487 ) Vendor non-trade receivables (955 ) (1,256 ) Other current and non-current assets (1,551 ) (1,001 ) Accounts payable 2,480 2,812 Deferred revenue 1,276 806 Other current and non-current liabilities 2,608 (239 ) Cash generated by operating activities 27,100 12,912 Investing activities: Purchases of marketable securities (75,133 ) (41,318 ) Proceeds from maturities of marketable securities 16,396 19,758 Proceeds from sales of marketable securities 34,301 14,048 Payments made in connection with business acquisitions, net of cash acquired 0 (615 ) Payments for acquisition of property, plant and equipment (2,615 ) (1,245 ) Payments for acquisition of intangible assets (266 ) (63 ) Other 34 (36 ) Cash used in investing activities (27,283 ) (9,471 ) Financing activities: Proceeds from issuance of common stock 577 733 Excess tax benefits from equity awards 915 652 Taxes paid related to net share settlement of equity awards (479 ) (384 ) Cash generated by financing activities 1,013 1,001 Increase in cash and cash equivalents 830 4,442 Cash and cash equivalents, end of the period $ 12,091 $ 9,705 Supplemental cash flow disclosure: Cash paid for income taxes, net $ 2,563 $ 2,657 Apple Inc. Q3 2011 Unaudited Summary Data Q2 2011 Q3 2010 Q3 2011 Sequential Change Year/Year Change Operating Segments CPUUnits K Revenue$M CPUUnits K Revenue$M CPUUnits K Revenue$M CPU Units Revenue CPU Units Revenue Americas 1,217 $9,323 1,358 $6,227 1,487 $10,126 22% 9% 9% 63% Europe 995 6,027 914 4,160 922 7,098 – 7% 18% 1% 71% Japan 155 1,383 129 910 150 1,510 – 3% 9% 16% 66% Asia Pacific 596 4,743 394 1,825 620 6,332 4% 34% 57% 247% Retail 797 3,191 677 2,578 768 3,505 – 4% 10% 13% 36% Total Operating Segments 3,760 $24,667 3,472 $15,700 3,947 $28,571 5% 16% 14% 82% Sequential Change Year/Year Change Product Summary Units K Revenue$M Units K Revenue$M Units K Revenue$M Units Revenue Units Revenue Desktops (1) 1,009 $1,441 1,004 $1,301 1,155 $1,580 14% 10% 15% 21% Portables (2) 2,751 3,535 2,468 3,098 2,792 3,525 1% 0% 13% 14% Subtotal CPUs 3,760 4,976 3,472 4,399 3,947 5,105 5% 3% 14% 16% iPod 9,017 1,600 9,406 1,545 7,535 1,325 – 16% – 17% – 20% – 14% Other Music Related Products and Services (3) 1,634 1,214 1,571 – 4% 29% iPhone and Related Products and Services (4) 18,647 12,298 8,398 5,334 20,338 13,311 9% 8% 142% 150% iPad and Related Products and Services (5) 4,694 2,836 3,270 2,166 9,246 6,046 97% 113% 183% 179% Peripherals and Other Hardware (6) 580 396 517 – 11% 31% Software, Service and Other Sales (7) 743 646 696 – 6% 8% Total Apple $24,667 $15,700 $28,571 16% 82% (1) Includes iMac, Mac mini, Mac Pro and Xserve product lines. (2) Includes MacBook, MacBook Air and MacBook Pro product lines. (3) Includes sales from the iTunes Store, App Store, and iBookstore in addition to sales of iPod services and Apple-branded and third-party iPod accessories. (4) Includes revenue recognized from iPhone sales, carrier agreements, services, and Apple-branded and third-party iPhone accessories. (5) Includes revenue recognized from iPad sales, services, and Apple-branded and third-party iPad accessories. (6) Includes sales of displays, wireless connectivity and networking solutions, and other hardware accessories. (7) Includes sales from the Mac App Store in addition to sales of other Apple-branded and third-party Mac software and Mac and Internet services. K = Units in thousands $M = Amounts in millions Contacts Apple Steve Dowling, 408-974-1896 (Press) [email protected] Nancy Paxton, 408-974-5420 (Investor Relations) [email protected] Joan Hoover, 408-974-4570 (Investor Relations) [email protected]
Real Housewives of Atlanta star Kandi Burruss revealed her daughter Riley’s amazing weight loss transformation. The 42-year-old posted a before and after photo on Instagram and said that Riley started taking fitness seriously about 10 months ago. Riley went from 220 pounds to 168 pounds, losing a total of 52 pounds in less than a year. Kandi suggests in the lengthy Instagram post that Riley didn’t rely on yo-yo dieting and dropped the weight the right way. She trained five days a week and reduced her calorie intake. The proud mother blasted social media trolls who mocked Riley for her weight over the years. The singer-songwriter revealed that Riley’s tall stature concealed the weight, making it difficult to tell she was over 200 pounds. Kandi implies that Riley will spill the beans on how she lost all the weight concluding the caption with the following. Riley shared the post on her own Instagram page and stated that she will continue her weight loss journey. Real Housewives of Atlanta Season 11 will likely feature Riley’s weight loss as the upcoming series is reportedly filming. Riley’s trainer gave some insight into her struggles in his own Instagram post via Hollywood Life. The trainer said that Riley pushed herself to the point where she fainted and was reluctant to keep training at some of the sessions. Kandi Burruss recently ran into a pregnant Kenya Moore at the airport. Moore announced in the Real Housewives of Atlanta Season 10 reunion that she is pregnant with Marc Daly. While some fans speculated whether the Moore was using a surrogate, it is clear from the photo that the 47-year-old reality TV star is pregnant. Burruss seems to be on a positive note heading into Season 11 as she resumed following Porsha Williams on Instagram, suggesting that the feud may be over.
Breastfeeding update 1: immunology, nutrition, and advocacy. When one reviews the wealth of information about the advantages of breastfeeding, there can be no doubt that this practice is healthy for both mother and infant. It is time for the "culture of medicine" to move beyond slogans. It is time for enthusiastic encouragement backed by meaningful action and time to move forward from personal perceptions and experiences to demonstrate to the culture at large that breastfeeding is normal, expected, and achievable.
/** * Opens the operation panel if the queue is empty, and if not, prepares the next file. */ private void checkForMoreFiles() { if (queue.isEmpty()) { currentTrackCutter = null; window.openStandByPanel(); queueSize = 0; queuePosition = 0; } else { prepareNextFile(); } }
There is often a need for lightweight protective material such as drop cloths to cover floors and furniture during moving, construction or other activities such as painting and decorating. One problem that has consistently been struggled with for such material is the need of the material to be relatively impervious to liquids such as water and paint. The prior art has dealt with the problem of waterproofing lightweight cloth materials by placing a plastic coating on one or both sides of a paper or cloth material. Unfortunately, although this makes the product waterproof, it also makes it very slippery. If a painter cannot place a ladder on the material without fear that it will slip out from under him or her, they are not apt to use it. There have been some prior art attempts at making non-slip surfaces but this relates mostly to roofing materials or more permanent material such as floor tapes and the like. Accordingly, what is needed is a lightweight, reusable, puncture resistant, cloth like material that is generally impervious to water and other liquids while providing at least one surface that is a non-slip surface.
Real Time CHIS Model for Efficient Sugarcane Plant Growth and Yield Estimation Model using Satellite Images The research on plant growth estimation of sugarcane plants is a key factor ongoing now days. The problem of plant growth and yield estimation of sugarcane plants is well studied. There are number of solutions recommended by different researchers, still they suffer with poor accuracy. Existing methods measure the plant growth according to the rainfall and temperature which introduces poor performance. To improve the performance, an efficient Climate Hydro Image Soil Model (CHISM) is presented. The model considers various properties namely climate conditions like temperature, humidity and hydrologic features namely rainfall, water poured and soil conditions towards plant growth. The method uses the satellite images in obtaining the soil condition, by applying image processing technique, the soil condition are obtained. Remaining features are obtained through the regional data set provided by agriculture sector. Using all these features, the method estimates various influence measure on different features considered. The method computes rainfall influence measure (RIM), water influence measure (WIM), temperature influence measure (TIM), humidity influence measure (HIM), and soil influence measure (SIM). Using all these measure, the model computes the plant growth rate (PGR) and Yield Rate (YR) in different time window. According to the measures estimated, the model performs water regulation. The method improves the performance of plant growth estimation and crop yield. I. INTRODUCTION The agricultural industry has higher impact in the world economy as well as social mobility. The ratio of population getting increased every year and to meet the social requirement the agricultural industry has to work on that. The country like India has more population and struggle with higher scarcity in commodities like wheat, rice and so on. However, the agricultural society and industry performs cultivation of plants in the available agricultural area. In India the 68% of geographic area is covered by agricultural and forest. The growing population replaces the agricultural area by residential and industries. This reduces the yield and outcome of the entire agricultural sector. Also it is necessary to come up with some strategic solution in this problem. The growth of agricultural sector is highly depending on various factors. For example, the yield of any plant is depending on temperature, humidity, rainfall, water poured, soil type, evaporation and so on. By considering all these features, the possible growth and the yield would be produced can be measured. By measuring the yield possible, and the growth of the plant, the necessary actions can be performed. There are number of approaches available to perform plant growth estimation and yield estimation, but they consider only limited features. For example, there are a method which uses area and rainfall in predicting the yield and plant growth. Similarly, there are number of methods available to perform plant growth estimation and yield estimation. By considering all these issues, the data mining approach can be adapted to the problem. Similarly, the soil type and the plant growth estimation can be performed based on satellite images. The satellite images are used for several conditions and problems. The agricultural growth can also be used depend on satellite images. The satellite images are used in predicting the rainfall, predicting climate changes. So, by using them, the plant growth estimation and yield prediction can be performed. This paper discusses such approach towards yield estimation of sugarcane plants. The proposed CHISM model has various stages of plant growth estimation and yield prediction. The method maintains the rate of yield produced in different time window of any region. Such records have been used in estimating the yield and growth of plants. Similarly, the satellite images are used for two different purposes. First, from the satellite image, the soil type can be identified, and the plant growth can be performed. Second, from the satellite images of various time windows, the change in climate and the rain fall can be predicted. This would support the water regulation and can perform water management efficiently. Such works can be performed by applying image processing technique. This CHISM model measures different influence measures towards plant growth estimation. The method considers the influence of different factors in achieving higher growth and yield of sugarcane. According to this the method estimates computes rainfall influence measure (RIM), water influence measure (WIM), temperature influence measure (TIM), humidity influence measure (HIM), and soil influence measure (SIM). Using all these measure, the model computes the plant growth rate (PGR) and Yield Rate (YR) in different time window. Among these measures, the temperature, humidity and rainfall features are measured according to the image features obtained from satellite images. The image processing techniques are used in obtaining the features. The detailed approach is presented in the next section. II. RELATED WORKS There are number of solution have been recommended from various researchers and such methods are discussed in this section. In, the author present crop yield estimation framework which consider the satellite images. The method uses satellite images and obtains various factors from the image using different image processing techniques and obtained result has been used to perform crop yield estimation. In, the author presents a mathematical model for crop yield estimation which uses remote sensing techniques. The images captured have been used to extract various features and the mathematical model uses energy balanced equation to hike the performance of crop yield estimation. In, the author presents a image processing framework through android phones to estimate the yield of Kiwifruit. The method considers the area of cultivation and the number of Kiwi fruit. Using these two, the method estimates the yield of fruit. In, the author presents a crop yield estimation approach by applying background subtraction towards wheat. Similarly, a vision orient approach on infection identification on plants is presented in, which uses color features. The image is segmented by applying k means approach and GLCM is used of classification of disease affected by. In, an IoT based plant growth estimation algorithm is presented, which uses color, texture and shape features of leafs to perform pattern matching towards plant growth estimation. In, the author present yield estimation algorithm which uses image processing technique in Vineyard. The method cluster the area under different condition and for each cluster a set of weight is measure. The segmentation of the image is used to perform yield estimation. In, the author present a detailed review on various image processing methods would support the problem of crop yield estimation. The author surveys the adaption of communication facilities in the growth of predicting the crop yield. Plant Diseases Recognition Based on Image Processing Technology, present a combined approach of image processing and region growing methods for the yield estimation. The system uses different linear regression methods and image processing feature extraction methods. The method produces noticeable results on crop yield estimation. In, the crop yield estimation is approached with improved deep learning pipeline. The method optimizes the parameters of crop yield according to threshold, output size and so on. The network is trained with number of images and evaluated for its performance in various parameters. In, the author present a yield estimation algorithm for red macroalga Kappaphycus alvarezii which uses satellite images in Indonesia. It notices that the carrageenan yield is higher when the temperature is moderate and the salinity range is higher. The biomass value should be less for the better growth of yield. In, the author presents a evapotranspiration value estimation approach using remote sensing. The method captures the remote sensing data and consider the evaporation occur on water source. The method considers the ratio of water being used at green season which is obtained from the satellite maps. Based on these values, the method estimates the crop water consumption. In, the author presents a crop yield estimation algorithm using AI and SI. The prediction of crop yield is performed from the satellite images. From the images, the method extracts various features of temporal nature. From the images, the method extracts the features like temperature, humidity, area of cultivation, water source and so on. Using these data, the CNN algorithm is used to predict the crop yield. In, the problem of crop yield estimation is handled with the combination of random forest and decision tree algorithms. The method is validated for its performance using the data set obtained from Terra. Decision tree is used to generate the trained model where the random forest is used to predict the result. The method archives efficient decision which is able to produce efficient values on plant growth. In, a satellite image based yield estimation and classification of crop has been presented. The method extracts the color and texture features to perform image classification towards yield estimation. In, the author presents a crop yield estimation algorithm which uses satellite images which consider both contextual information. It works towards the criteria of fixing compensation for the farmers at huge loss. The method identifies various factors and performs classification according to the crop properties. The remote sensing images are used to extract various features to perform classification. In, the author performs yield estimation on Maize using satellite images of Zimbabwe. The method performs inference on yield according to the data obtained from small-scale commercial farming sector (SSCF) is used for analysis. The method has been validated with different national yield model with different data sets. In, the author presents a corn height estimation scheme towards yield prediction. The UAV dataset has been used for evaluation which contains information like RGB data. The same data has been used for evaluation and estimate the height of corn height estimation. In, the author presents a satellite image based crop yield estimation model to support fertilizer application on corn field. Similarly, in, the author present a satellite image based chlorophyll content estimation scheme SMLR-PSO model. The method uses spectral images and extracts various features using random field segmentation to estimate the dependencies. PSO algorithm is used to perform prediction to produce efficient results. All the methods discussed has been suffer with achieving higher yield and introduces poor accuracy. A. CHIS Sugarcane Plant Growth and Yield Estimation Model: The proposed CHIS (Climate Hydrology Image Soil) model uses the agricultural data obtained from different region of the country. Also, the satellite images obtained from weather research center are used to extract different features. From the data set available, the method split the data into number of time domain. Also the data has been split into number of regions space and for each space; the method estimates different influence measures. Similarly, the method preprocesses the input images and performs segmentation to extract features. The architecture of proposed CHIS model is presented in Figure 1 which shows various functional components present. Each functional part is discussed in detail in this section. B. Agriculture Feature Extraction: The agricultural data set ADs given has been taken here for feature extraction. The data set would have trace for number of years according to different region of the country. The data present in the data set would contain noisy or incomplete values. Such values are identified and the records with incomplete features have been eliminated from the data set to produce preprocessed data set. From the preprocessed set, the method split the records into number of time space. The records may be available for different time space like year, month and so on. Such records are split into number of groups. Further the records split into region space and for each region space, the method extract the features to be used to measure various values. Consider the agriculture data set Ads, which contains 30 year data which represent 30 time space. First the list of features and facts available in the data set is identified as follows: The equation indexes the logs available in Nrs to different time stamp clusters according to the time value present in each log. Generated clusters are used to estimate various measures towards plant growth estimation. C. Satellite Image Feature Extraction: The feature from satellite image is extracted in this stage. The satellite image would have noise values due to the capturing device. It is necessary to remove the noisy pixels. It is performed by applying Gabor filter. Further, the image has been applied with Histogram equalization technique to improve the quality of image. Further, the method generates gray scale image and performs segmentation by applying gray threshold segmentation. The gray threshold segmentation is the process of grouping the pixels of image according to their gray value. From the grouped image, the method identifies the region of image which represents water particles and the soil type is detected. The soil type is detected according to the gray scale value present in the image. Extracted features are used to perform plant growth estimation and crop yield estimation. //equation identifies the maximum gray scale value available in the image. Compute histogram values of GI as GIHist = Histogram (GI) Choose least 2 gray values as two. Choose maximum gray scale value as WT. Segmented Image Simg = Perform segmentation with WT. Stop The above discussed algorithm represents how the plant growth on sugarcane cultivation and its plant has been measured. The method estimates various ratio of factors based on which the method performs plant growth estimation. E. CHIS Crop Yield Estimation: The crop yield of sugarcane has been measured according to different factors. The crop yield is measured based on water poured, rain fall, soil type, temperature, humidity, area of cultivation and so on. For each factor, the method computes influence measures. The rainfall influence is measured according to the ratio of rainfall and rainfall obtained in the current time window. Similarly, the temperature influence is measured according to values of temperature currently recorded and the mean value of temperature in the region. The humidity influence measure is computed accordingly. //distance between the volume of water poured in the current input sample, average water poured in the logs. Compute Temperature Influence Measure TIM =Dist (( //distance between the temperature in current time and the average temperature. Compute Humidity Influence Measure HIM = Dist(( //distance between the average humidity and the humidity monitored currently. Compute yield value Yv =( Stop The above discussed algorithm represents how the yield has been estimated based on the various influence measure computed. F. CHIR Water Regulation: The water regulation becomes more important because of the shortfall of rain which is being increased every year. To achieve higher plant growth and yield, it is necessary to regulate the required volume of water to the plants. It is measured according to the rainfall, temperature and humidity of specific area of cultivation. According to the value of different factors, the specific volume of water has been regulated to the plant area. The above discussed algorithm represents the volume of water to be regulated to the cultivation area, which is measured according to different factors. III. RESULTS AND DISCUSSION The proposed CHIS model has been implemented using Matlab and evaluated for its performance under various factors. The method has been evaluated for its performance using different data set. The agricultural data has been collected from Coimbatore region maintained by agricultural department of India. Similarly, the satellite images are obtained from the same. Using the both data set, the method evaluates the performance of proposed algorithm. The performance on plant growth estimation, crop yield estimation and water regulation has been measured for various methods and presented in Table 2. The proposed CHIS model has produced higher performance in all the parameters considered. Fig. 3, Performance on plant growth estimation The performance on plant growth estimation is measured between different methods and plotted in Figure 3. The proposed CHIS model has achieved higher performance than other methods. Fig. 4, Performance on crop yield estimation The performance on crop yield has been measured and compared with the result of other methods. The proposed CHIS model has produced higher crop yield than other methods. Fig. 5, Performance on water regulation The performance on water regulation has been measured between different methods. Obtained results are compared with each method in Figure 5. The proposed CHIS model has produced higher performance than other methods. IV. CONCLUSION In this paper, an efficient CHIS model is presented to perform plant growth and yield estimation. The model maintains the traces of previous agriculture growth and yield obtained from different geographic area of India. In particular, the records from Coimbatore region has been collected and grouped under different time stamp. In the prediction and estimation, the clustered data has been used. Using these data, the method computes different influence measures like Temperature influence measure (TIM), Humidity Influence Measure (HIM), Water Influence Measure (WIM), Rainfall Influence Measure (RIM) and etc. Using these measures, the method estimates the possible yield and estimate the growth of the sugarcane. Similarly, using the details, the method estimates the volume of water to be regulated to the plants. The adaption of CHIS model has support the performance development in estimating the crop yield, plant growth and water regulation. The proposed method improves the performance in growth estimation and yield estimation than other methods.
/** * Copies a file into another file. * * @param srcFile the input file to copy * @param destFile the output file to copy to * @throws IOException if an I/O exception occurs */ public static void copy(File srcFile, File destFile) throws IOException { if (srcFile.equals(destFile)) { throw new IOException("You cannot copy a file onto itself"); } byte buf[] = new byte[4096]; FileInputStream in = null; FileOutputStream out = null; try { in = new FileInputStream(srcFile); out = new FileOutputStream(destFile); while (in.available() > 0) { out.write(buf, 0, in.read(buf, 0, buf.length)); } } finally { if (in != null) { in.close(); } if (out != null) { out.close(); } } }
import { SortDirection } from '../models/types'; export const delay = (ms: number) => new Promise(r => setTimeout(() => r(), ms)); export const compare = (v1, v2, d: SortDirection) => { if (v1 === v2) { return 0; } if (v1 >= v2) { return d === 'asc' ? 1 : -1; } if (v1 <= v2) { return d === 'asc' ? -1 : 1; } }; export const sort = <T>(selector: (v: T) => any, d: SortDirection) => ( v1: T, v2: T, ) => { return compare(selector(v1), selector(v2), d); };
def download_from_onda(scene_id: str, destination: str): catalog = search_onda_catalog_by_scene_id(scene_id) credentials = get_credentials().get('onda') catalog.set_credentials(credentials.get('username'), credentials.get('password')) catalog.order() catalog.download(destination)
<reponame>CardboardRobots/console-style import { ConsoleEnd, ConsoleStyle } from '../constants'; import { text } from '../format'; export function reset(value: any) { return text(ConsoleStyle.Reset, ConsoleStyle.Reset, value); } export function bold(value: any) { return text(ConsoleStyle.Bold, ConsoleEnd.Bold, value); } // TODO: Test this export function dim(value: any) { return text(ConsoleStyle.Dim, ConsoleEnd.Bold, value); } export function italic(value: any) { return text(ConsoleStyle.Italic, ConsoleEnd.Italic, value); } export function underline(value: any) { return text(ConsoleStyle.Underline, ConsoleEnd.Underline, value); } export function inverse(value: any) { return text(ConsoleStyle.Inverse, ConsoleEnd.Inverse, value); } export function hidden(value: any) { return text(ConsoleStyle.Hidden, ConsoleEnd.Hidden, value); } export function strikethrough(value: any) { return text(ConsoleStyle.Strikethrough, ConsoleEnd.Strikethrough, value); }
import routeDispatcher from '../route-dispatcher' import store from '../../core/store' import Routes from '../../constants/routes' import { RouteValue } from '../../types/core' import { checklistDetailRequestData } from '@/actions/checklist-detail' jest.mock('../../core/store') describe('routeDispatcher', () => { it('should dispatch to checkListDetail for the home route', async () => { await routeDispatcher(Routes.CHECKLIST_DETAIL as RouteValue, { id: '1' }) expect(store.dispatch).toHaveBeenCalledWith(checklistDetailRequestData('1')) }) })
<filename>src/api/services/updateBook/UpdateBookService.ts import { Book } from "../../entities/Book"; class UpdateBookService { async execute({ idbook, id, book_name, author, categories, book_rating, publisher, year, pages, volume, edition, language, file, }) { const findBookById = await Book.findByPk(idbook); // if (!findBookById) { // Tratativa de erro // return res.status(400).json("Este livro não existe"); // } const updateBook = await findBookById.update({ id, book_name, author, categories, book_rating, publisher, year, pages, volume, edition, language, file, }); return updateBook; } } export { UpdateBookService };
Remote Programmatic vCloud Forensics: A Six-Step Collection Process and a Proof of Concept With the increasing popularity of cloud services and their potential to either be the target or the tool in a cybercrime activity, organizational cloud services users need to ensure that they are able to collect evidential data should they be involved in litigation or a criminal investigation. In this paper, we seek to contribute to a better understanding of the technical issues and processes regarding collection of evidential data in the cloud computing environment. Using VMware vCloud as a case study in this paper, we describe the various artefacts available in the cloud environment and identify several forensic preservation considerations for forensics practitioners. We then propose a six-step process for the remote programmatic collection of evidential data to ensure as few changes as possible are made as part of evidence collection and that no potential evidence is missed. The six-step process is implemented in a proof of concept application to demonstrate utility of the process.
The inventive concept relates to semiconductor devices. More particularly, the inventive concept relates to semiconductor devices having a dummy active region. There is a growing demand for semiconductor devices that operate at higher speeds and/or that have higher degrees of integration. Accordingly, a great amount of importance is attached to the layout design of semiconductor devices as well as the circuit design of and techniques used for manufacturing today's semiconductor devices. A typical semiconductor device includes a semiconductor substrate, a device isolation pattern dividing the substrate into active regions, and electronic components disposed at the active regions and insulated from each other by the device isolation pattern. As semiconductor devices become more highly integrated, the widths of device isolation patterns are becoming smaller. One technique for forming a narrow device isolation pattern is a shallow trench isolation (STI) technique. In this technique, a trench is formed in the substrate, and the trench is filled with insulating material to divide an upper portion of the substrate into active regions. However, a device isolation pattern in a trench formed in a semiconductor substrate may stress the semiconductor substrate (specifically, the active regions). In the case in which the electronic components formed at the active regions are MOS transistors, for example, the stress generated by a device isolation pattern formed in a trench is imparted to the MOS transistors and the operating characteristics of the MOS transistors can be adversely affected by the stress. Furthermore, the MOS transistors may be concentrated within a predetermined region of the substrate. In this case, the stress imparted to a MOS transistor located in a central part of that region may be different from the stress imparted to a MOS transistor located at the periphery of that region.
<filename>sudoku/sudoku_solver/views.py from django.shortcuts import render from django.http import HttpResponse from django.template import Context,loader import random,copy from .models import Results # Create your views here. def index(request): #Sample insert into the database ''' result1=Results() result1.age = 20 result1.student_name = "Sudhir" result1.task_selected = "Sudoku" result1.task_outcome = "Correct" result1.save() ''' results = run(n=40) # find puzzles with as few givens as possible. puzzle = best(results) # use the best one of those puzzles. template=loader.get_template("sudoku_solver/index.html") context={'cell1':puzzle[0][0],'cell2':puzzle[0][1],'cell3':puzzle[0][2],'cell4':puzzle[0][3], 'cell5':puzzle[0][4],'cell6':puzzle[0][5],'cell7':puzzle[0][6],'cell8':puzzle[0][7], 'cell9':puzzle[0][8],'cell10':puzzle[1][0],'cell11':puzzle[1][1],'cell12':puzzle[1][2], 'cell13':puzzle[1][3],'cell14':puzzle[1][4],'cell15':puzzle[1][5],'cell16':puzzle[1][6], 'cell17':puzzle[1][7],'cell18':puzzle[1][8],'cell19':puzzle[2][0],'cell20':puzzle[2][1], 'cell21':puzzle[2][2],'cell22':puzzle[2][3],'cell23':puzzle[2][4],'cell24':puzzle[2][5], 'cell25':puzzle[2][6],'cell26':puzzle[2][7],'cell27':puzzle[2][8],'cell28':puzzle[3][0], 'cell29':puzzle[3][1],'cell30':puzzle[3][2],'cell31':puzzle[3][3],'cell32':puzzle[3][4], 'cell33':puzzle[3][5],'cell34':puzzle[3][6],'cell35':puzzle[3][7],'cell36':puzzle[3][8], 'cell37':puzzle[4][0],'cell38':puzzle[4][1],'cell39':puzzle[4][2],'cell40':puzzle[4][3], 'cell41':puzzle[4][4], 'cell42':puzzle[4][5],'cell43':puzzle[4][6],'cell44':puzzle[4][7],'cell45':puzzle[4][8], 'cell46':puzzle[5][0],'cell47':puzzle[5][1],'cell48':puzzle[5][2],'cell49':puzzle[5][3], 'cell50':puzzle[5][4],'cell51':puzzle[5][5],'cell52':puzzle[5][6],'cell53':puzzle[5][7], 'cell54':puzzle[5][8],'cell55':puzzle[6][0],'cell56':puzzle[6][1],'cell57':puzzle[6][2], 'cell58':puzzle[6][3],'cell59':puzzle[6][4],'cell60':puzzle[6][5],'cell61':puzzle[6][6], 'cell62':puzzle[6][7],'cell63':puzzle[6][8],'cell64':puzzle[7][0],'cell65':puzzle[7][1], 'cell66':puzzle[7][2],'cell67':puzzle[7][3],'cell68':puzzle[7][4],'cell69':puzzle[7][5], 'cell70':puzzle[7][6],'cell71':puzzle[7][7],'cell72':puzzle[7][8],'cell73':puzzle[8][0], 'cell74':puzzle[8][1],'cell75':puzzle[8][2],'cell76':puzzle[8][3],'cell77':puzzle[8][4], 'cell78':puzzle[8][5],'cell79':puzzle[8][6],'cell80':puzzle[8][7],'cell81':puzzle[8][8]} return render(request,"sudoku_solver/index.html",context) def homepage(request): return render(request,"sudoku_solver/homepage.html") def homepage_2(request): return render(request,"sudoku_solver/homepage_2.html") def task(request): return render(request,"sudoku_solver/game.html") def intro_task(request): return render(request,"sudoku_solver/intro_task.html") def intro_game(request): return render(request,"sudoku_solver/intro_game.html") def second_task(request): return render(request,"sudoku_solver/second_game.html") """ SUDOKU (NUMBER PLACE) PUZZLE GENERATOR by <NAME> November 12, 2005 This program is released into the public domain. Revision 3 """ sample = [ [3,4,1,2,9,7,6,8,5], [2,5,6,8,3,4,9,7,1], [9,8,7,1,5,6,3,2,4], [1,9,2,6,7,5,8,4,3], [8,7,5,4,2,3,1,9,6], [6,3,4,9,1,8,2,5,7], [5,6,3,7,8,9,4,1,2], [4,1,9,5,6,2,7,3,8], [7,2,8,3,4,1,5,6,9] ] """ Randomly arrange numbers in a grid while making all rows, columns and squares (sub-grids) contain the numbers 1 through 9. For example, "sample" (above) could be the output of this function. """ def construct_puzzle_solution(): # Loop until we're able to fill all 81 cells with numbers, while # satisfying the constraints above. while True: try: puzzle = [[0]*9 for i in range(9)] # start with blank puzzle rows = [set(range(1,10)) for i in range(9)] # set of available columns = [set(range(1,10)) for i in range(9)] # numbers for each squares = [set(range(1,10)) for i in range(9)] # row, column and square for i in range(9): for j in range(9): # pick a number for cell (i,j) from the set of remaining available numbers choices = rows[i].intersection(columns[j]).intersection(squares[(int(i/3))*3 + int(j/3)]) choice = random.choice(list(choices)) puzzle[i][j] = choice rows[i].discard(choice) columns[j].discard(choice) squares[(int(i/3))*3 + int(j/3)].discard(choice) # success! every cell is filled. return puzzle except IndexError: # if there is an IndexError, we have worked ourselves in a corner (we just start over) pass """ Randomly pluck out cells (numbers) from the solved puzzle grid, ensuring that any plucked number can still be deduced from the remaining cells. For deduction to be possible, each other cell in the plucked number's row, column, or square must not be able to contain that number. """ def pluck(puzzle, n=0): """ Answers the question: can the cell (i,j) in the puzzle "puz" contain the number in cell "c"? """ def canBeA(puz, i, j, c): v = puz[int(c/9)][c%9] if puz[int(i)][int(j)] == v: return True if puz[int(i)][int(j)] in range(1,10): return False for m in range(9): # test row, col, square # if not the cell itself, and the mth cell of the group contains the value v, then "no" if not (m==int(c/9) and j==c%9) and puz[m][j] == v: return False if not (i==int(c/9) and m==c%9) and puz[i][m] == v: return False if not ((int(i/3))*3 + int(m/3)==int(c/9) and (int(j/3))*3 + m%3==c%9) and puz[(int(i/3))*3 + int(m/3)][(int(j/3))*3 + m%3] == v: return False return True """ starts with a set of all 81 cells, and tries to remove one (randomly) at a time but not before checking that the cell can still be deduced from the remaining cells. """ cells = set(range(81)) cellsleft = cells.copy() while len(cells) > n and len(cellsleft): cell = random.choice(list(cellsleft)) # choose a cell from ones we haven't tried cellsleft.discard(cell) # record that we are trying this cell # row, col and square record whether another cell in those groups could also take # on the value we are trying to pluck. (If another cell can, then we can't use the # group to deduce this value.) If all three groups are True, then we cannot pluck # this cell and must try another one. row = col = square = False for i in range(9): if i != cell/9: if canBeA(puzzle, i, cell%9, cell): row = True if i != cell%9: if canBeA(puzzle, int(cell/9), i, cell): col = True if not ((int(int(cell/9)/3))*3 + int(i/3) == int(cell/9) and (int(cell/9)%3)*3 + i%3 == cell%9): if canBeA(puzzle, (int(int(cell/9)/3))*3 + int(i/3), (int(cell/9)%3)*3 + i%3, cell): square = True if row and col and square: continue # could not pluck this cell, try again. else: # this is a pluckable cell! puzzle[int(cell/9)][cell%9] = 0 # 0 denotes a blank cell cells.discard(cell) # remove from the set of visible cells (pluck it) # we don't need to reset "cellsleft" because if a cell was not pluckable # earlier, then it will still not be pluckable now (with less information # on the board). # This is the puzzle we found, in all its glory. return (puzzle, len(cells)) """ That's it. If we want to make a puzzle we can do this: pluck(construct_puzzle_solution()) The following functions are convenience functions for doing just that... """ """ This uses the above functions to create a new puzzle. It attempts to create one with 28 (by default) given cells, but if it can't, it returns one with as few givens as it is able to find. This function actually tries making 100 puzzles (by default) and returns all of them. The "best" function that follows this one selects the best one of those. """ def run(n = 2, iter=40): all_results = {} print ("Constructing a sudoku puzzle.") print ("* creating the solution...") a_puzzle_solution = construct_puzzle_solution() print(a_puzzle_solution) print ("* constructing a puzzle...") for i in range(iter): puzzle = copy.deepcopy(a_puzzle_solution) (result, number_of_cells) = pluck(puzzle, n) all_results.setdefault(number_of_cells, []).append(result) if number_of_cells <= n: break return all_results def best(set_of_puzzles): # Could run some evaluation function here. For now just pick # the one with the fewest "givens". return set_of_puzzles[min(set_of_puzzles.keys())][0] def display(puzzle): for row in puzzle: print (' '.join([str(n or '_') for n in row])) def end_game(request): # Get the post variables #solution = request.POST['solution'] resu = request.POST.get('resu',False) hours=request.POST.get('hours',False) age=request.POST.get('age',False) gender=request.POST.get('gender',False) lass=request.POST.get('lass',False) subjects=request.POST.get('subjects',False) fav=request.POST.get('fav',False) ID=request.POST.get('ID',False) ID2=request.POST.get('ID2',False) task_selected=request.POST.get('task_selected',False) task1_selection=request.POST.get('task1_selection',False) task2_selection=request.POST.get('task2_selection',False) task2_colour=request.POST.get('task2_colour',False) ''' Not required now since separate systems age2=request.POST.get('age2',False) gender2=request.POST.get('gender2',False) lass2=request.POST.get('lass2',False) subjects2=request.POST.get('subjects2',False) fav2=request.POST.get('fav2',False) ''' resuk=Results() resuk.task_selected=task_selected resuk.time_taken=hours resuk.token=ID resuk.p_ID=ID2 resuk.age=age resuk.gender=gender resuk.standard=lass resuk.subjects=subjects resuk.favorite=fav resuk.task_outcome=resu resuk.task1_selection=task1_selection resuk.task2_selection=task2_selection resuk.task2_colour=task2_colour ''' Not required now since separate systems resuk.age_2=age2 resuk.gender_2=gender2 resuk.standard_2=lass2 resuk.subjects_2=subjects2 resuk.favorite_2=fav2 ''' resuk.save() # You may want to validate data here try: # Setting output response = { 'status': 1, 'message': 'saved' } except Exception as e: # Something went wrong response = { 'status': 0, 'message': 'Something went wrong - ' +str(e) } return HttpResponse(response,content_type='application/json') """ Controls starts here """ #results = run(n=0) # find puzzles with as few givens as possible. #puzzle = best(results) # use the best one of those puzzles. #display(puzzle) # display that puzzle.
Victoria Hall, Saltaire History Saltaire Institute was built by the architectural firm of Lockwood and Mawson between 1867 and 1871 for the industrialist and philanthropist Sir Titus Salt. It cost £25,000. In the original design, the building contained a main hall seating 800, a lecture room, two art rooms, a laboratory, a gymnasium, a library of 8,500 books and a reading room. For use of the building, a quarterly fee was charged. This ranged downwards from 2 shillings for adult males. Architecture Victoria Hall is a T-plan, two-storey building with a basement, constructed in ashlar, with rock-faced stone and a Welsh slate roof. Exterior To the front, the exterior has a symmetrical, eleven-bay Italianate facade, with vermiculated quoins at ground floor level and pilaster quoins to the first floor. The central bay of the building breaks forward. On top of this bay is an elaborate square tower with pyramidal ashlar roof. Each side of the tower has a modillioned segmental pediment on an enriched entablature, supported by Corinthian columns, framing slender, round-arched windows. The central portal has double, panelled doors, fanlight, and large open segmental pediment supported on large consoles. The tympanum has a cartouche bearing the Salt family coat of arms, flanked by the carved figures of Art and Science by Thomas Milnes. At basement level, the windows are square-headed, while at ground and first floor level the windows are round-arched and archivolted, the first floor windows being framed by fluted Corinthian colonnettes, and with carved head keystones and blind balustrade with turned balusters. There is a dentilled cornice between the ground and first floors. The modillioned cornice forms the base to a deep, panelled parapet decorated with rosettes and pedimented piers with grotesque winged beasts supporting iron finials. Three-bay return elevations. The main hall projects at the rear. It is seven bays long by five bays wide with tall slender round-arched windows with glazing bars and circles in heads. Interior The entrance hall has a large, stone dog-leg staircase with large square piers and vertically symmetrical turned balusters. The main hall has an elaborately plastered, coffered roof. Pilasters mark the bay divisions and support a bracketed entablature. There is a raking gallery at the rear, on fluted cast-iron columns. The former side galleries have been removed and there is glass panelling at the rear. Grounds The building is set back from Victoria Road, on which it sits. There is a gardened square outside, bounded by a dwarf wall. At the front corners, on large square bases, are 2 sculpted lions, by Thomas Milnes of London, representing War and Peace. At the rear of the wall are round section cast-iron railings with spear-head finials on a dwarf wall. Modern use At the present time, it is commonly used as a concert venue. Memorable concerts held there include the homecoming gig of local group Terrorvision after they reunited in 2007. On 29 April 2010, the BBC TV series Antiques Roadshow was filmed at Victoria Hall. The show was broadcast in two parts on 13 and 20 March 2011.
Working memory: theories, models, and controversies. I present an account of the origins and development of the multicomponent approach to working memory, making a distinction between the overall theoretical framework, which has remained relatively stable, and the attempts to build more specific models within this framework. I follow this with a brief discussion of alternative models and their relationship to the framework. I conclude with speculations on further developments and a comment on the value of attempting to apply models and theories beyond the laboratory studies on which they are typically based.
/** * generates the delivery run for the drone simulation * @param choice user input * @param dr delivery run * @return created delivery run */ public DeliveryRun generateDeliveryRouteDrone(int choice, DeliveryRun dr) throws RuntimeException{ boolean isDistance; if(choice == 1){ isDistance = true; }else{ isDistance = false; } List<Address> addresses = getAddressesToDeliver(dr); Drone deliverer = ApplicationPOT.getInstance().getPlatform().getDroneDB().getDroneById(dr.getiDDeliverer()); double totalWeight = calculateDeliveryWeight(dr) + deliverer.getWeight(); DeliveryRoute deliveryRouteDrone = new DeliveryRoute(Constants.PROXIMITY_MAP_DRONE, addresses, totalWeight, isDistance, deliverer.getPharmacyID(), dr.getiDDeliverer()); deliveryRouteDrone.convertAddressToVertex(); deliveryRouteDrone.getShortestPathVisitingAllAddresses(); dr.setDeliveryRoute(deliveryRouteDrone); return dr; }
/** * shared ptr message. * for audio/video/data message that need less memory copy. * and only for output. */ class SrsSharedPtrMessage : public ISrsMessage { private: typedef ISrsMessage super; private: struct SrsSharedPtr { char* payload; int size; int perfer_cid; int shared_count; SrsSharedPtr(); virtual ~SrsSharedPtr(); }; SrsSharedPtr* ptr; public: SrsSharedPtrMessage(); virtual ~SrsSharedPtrMessage(); public: virtual bool can_decode(); public: virtual int initialize(SrsCommonMessage* source); virtual int initialize(SrsMessageHeader* source, char* payload, int size); virtual SrsSharedPtrMessage* copy(); public: virtual int get_perfer_cid(); virtual int encode_packet(); }
/*-------------------------------------------------------------------- * Wash the min/max/default values, and leave the default set. */ void MCF_InitParams(struct cli *cli) { struct plist *pl; struct parspec *pp; struct vsb *vsb; VCLS_AddFunc(mgt_cls, MCF_AUTH, cli_params); vsb = VSB_new_auto(); AN(vsb); VTAILQ_FOREACH(pl, &phead, list) { pp = pl->spec; if (pp->flags & NOT_IMPLEMENTED) continue; if (pp->min != NULL) mcf_wash_param(cli, pp, &pp->min, "minimum", vsb); if (pp->max != NULL) mcf_wash_param(cli, pp, &pp->max, "maximum", vsb); AN(pp->def); mcf_wash_param(cli, pp, &pp->def, "default", vsb); } VSB_destroy(&vsb); }
/// Truncates the preceding siblings to the given end position, /// and returns the new current node. pub(crate) fn truncate_siblings(&mut self, bytes: &[u8], end_byte_ix: usize) { let parent_ix = self.peek_up().unwrap(); let mut next_child_ix = self[parent_ix].child; let mut prev_child_ix = None; // drop or truncate children based on its range while let Some(child_ix) = next_child_ix { let child_end = self[child_ix].item.end; if child_end < end_byte_ix { // preserve this node, and go to the next prev_child_ix = Some(child_ix); next_child_ix = self[child_ix].next; continue; } else if child_end == end_byte_ix { // this will be the last node self[child_ix].next = None; // focus to the new last child (this node) self.cur = Some(child_ix); } else if self[child_ix].item.start == end_byte_ix { // check whether the previous character is a backslash let is_previous_char_backslash_escape = end_byte_ix.checked_sub(1).map_or(false, |prev| { (bytes[prev] == b'\\') && (self[child_ix].item.body == ItemBody::Text) }); if is_previous_char_backslash_escape { // rescue the backslash as a plain text content let last_byte_ix = end_byte_ix - 1; self[child_ix].item.start = last_byte_ix; self[child_ix].item.end = end_byte_ix; self.cur = Some(child_ix); } else if let Some(prev_child_ix) = prev_child_ix { // the node will become empty. drop the node // a preceding sibling exists self[prev_child_ix].next = None; self.cur = Some(prev_child_ix); } else { // no preceding siblings. remove the node from the parent self[parent_ix].child = None; self.cur = None; } } else { debug_assert!(self[child_ix].item.start < end_byte_ix); debug_assert!(end_byte_ix < child_end); // truncate the node self[child_ix].item.end = end_byte_ix; self[child_ix].next = None; // focus to the new last child self.cur = Some(child_ix); } break; } }
/** * Copyright 2014 Flipkart Internet Pvt. Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.flipkart.foxtrot.core.querystore.impl; import com.flipkart.foxtrot.common.ActionRequest; import com.flipkart.foxtrot.common.Table; import com.flipkart.foxtrot.core.common.PeriodSelector; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.joda.time.DateTime; import org.joda.time.Interval; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.List; /** * User: <NAME> (<EMAIL>) * Date: 24/03/14 * Time: 3:46 PM */ public class ElasticsearchUtils { private static final Logger logger = LoggerFactory.getLogger(ElasticsearchUtils.class.getSimpleName()); public static final String DOCUMENT_TYPE_NAME = "document"; public static final String DOCUMENT_META_TYPE_NAME = "metadata"; public static final String DOCUMENT_META_FIELD_NAME = "__FOXTROT_METADATA__"; public static final String DOCUMENT_META_ID_FIELD_NAME = String.format("%s.id", DOCUMENT_META_FIELD_NAME); public static String TABLENAME_PREFIX = "foxtrot"; public static final String TABLENAME_POSTFIX = "table"; private static final DateTimeFormatter FORMATTER = DateTimeFormat.forPattern("dd-M-yyyy"); public static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormat.forPattern("dd-M-yyyy"); public static void setTableNamePrefix(ElasticsearchConfig config) { ElasticsearchUtils.TABLENAME_PREFIX = config.getTableNamePrefix(); } public static String getIndexPrefix(final String table) { return String.format("%s-%s-%s-", ElasticsearchUtils.TABLENAME_PREFIX, table, ElasticsearchUtils.TABLENAME_POSTFIX); } public static String getIndices(final String table) { /*long currentTime = new Date().getTime(); String names[] = new String[30]; //TODO::USE TABLE METADATA for(int i = 0 ; i < 30; i++) { String postfix = new SimpleDateFormat("dd-M-yyyy").format(new Date(currentTime)); names[i] = String.format("%s-%s-%s", TABLENAME_PREFIX, table, postfix); }*/ return String.format("%s-%s-%s-*", ElasticsearchUtils.TABLENAME_PREFIX, table, ElasticsearchUtils.TABLENAME_POSTFIX); } public static String[] getIndices(final String table, final ActionRequest request) throws Exception { return getIndices(table, request, new PeriodSelector(request.getFilters()).analyze()); } @VisibleForTesting public static String[] getIndices(final String table, final ActionRequest request, final Interval interval) { DateTime start = interval.getStart().toLocalDate().toDateTimeAtStartOfDay(); if (start.getYear() <= 1970) { logger.warn("Request of type {} running on all indices", request.getClass().getSimpleName()); return new String[]{getIndices(table)}; } List<String> indices = Lists.newArrayList(); final DateTime end = interval.getEnd().plusDays(1).toLocalDate().toDateTimeAtStartOfDay(); while (start.getMillis() < end.getMillis()) { final String index = getCurrentIndex(table, start.getMillis()); indices.add(index); start = start.plusDays(1); } logger.info("Request of type {} on indices: {}", request.getClass().getSimpleName(), indices); return indices.toArray(new String[indices.size()]); } public static String getCurrentIndex(final String table, long timestamp) { //TODO::THROW IF TIMESTAMP IS BEYOND TABLE META.TTL String datePostfix = FORMATTER.print(timestamp); return String.format("%s-%s-%s-%s", ElasticsearchUtils.TABLENAME_PREFIX, table, ElasticsearchUtils.TABLENAME_POSTFIX, datePostfix); } public static PutIndexTemplateRequest getClusterTemplateMapping(IndicesAdminClient indicesAdminClient) { try { PutIndexTemplateRequestBuilder builder = new PutIndexTemplateRequestBuilder(indicesAdminClient, "generic_template"); builder.setTemplate(String.format("%s-*", ElasticsearchUtils.TABLENAME_PREFIX)); System.out.println(getDocumentMapping().string()); builder.addMapping(DOCUMENT_TYPE_NAME, getDocumentMapping()); return builder.request(); } catch (IOException ex) { logger.error("TEMPLATE_CREATION_FAILED", ex); return null; } } public static XContentBuilder getDocumentMapping() throws IOException { return XContentFactory.jsonBuilder() .startObject() .field(DOCUMENT_TYPE_NAME) .startObject() .field("_source") .startObject() .field("enabled", false) .endObject() .field("_all") .startObject() .field("enabled", false) .endObject() .field("_timestamp") .startObject() .field("enabled", true) .field("store", true) .endObject() .field("dynamic_templates") .startArray() .startObject() .field("template_metadata_fields") .startObject() .field("path_match", ElasticsearchUtils.DOCUMENT_META_FIELD_NAME + ".*") .field("mapping") .startObject() .field("store", true) .field("doc_values", true) .field("index", "not_analyzed") .field("fielddata") .startObject() .field("format", "doc_values") .endObject() .endObject() .endObject() .endObject() .startObject() .field("template_timestamp") .startObject() .field("match", "timestamp") .field("mapping") .startObject() .field("store", false) .field("index", "not_analyzed") .field("fielddata") .startObject() .field("format", "doc_values") .endObject() .field("type", "date") .endObject() .endObject() .endObject() .startObject() .field("template_no_store_analyzed") .startObject() .field("match", "*") .field("match_mapping_type", "string") .field("mapping") .startObject() .field("store", false) .field("index", "not_analyzed") .field("fielddata") .startObject() .field("format", "doc_values") .endObject() .field("fields") .startObject() .field("analyzed") .startObject() .field("store", false) .field("type", "string") .field("index", "analyzed") .endObject() .endObject() .endObject() .endObject() .endObject() .startObject() .field("template_no_store") .startObject() .field("match_mapping_type", "date|boolean|double|long|integer") .field("match_pattern", "regex") .field("path_match", ".*") .field("mapping") .startObject() .field("store", false) .field("index", "not_analyzed") .field("fielddata") .startObject() .field("format", "doc_values") .endObject() .endObject() .endObject() .endObject() .endArray() .endObject() .endObject(); } public static void initializeMappings(Client client) { PutIndexTemplateRequest templateRequest = getClusterTemplateMapping(client.admin().indices()); client.admin().indices().putTemplate(templateRequest).actionGet(); } public static String getValidTableName(String table) { if (table == null) return null; return table.trim().toLowerCase(); } public static boolean isIndexValidForTable(String index, String table) { String indexPrefix = getIndexPrefix(table); return index.startsWith(indexPrefix); } public static boolean isIndexEligibleForDeletion(String index, Table table) { if (index == null || table == null || !isIndexValidForTable(index, table.getName())) { return false; } String indexPrefix = getIndexPrefix(table.getName()); String creationDateString = index.substring(index.indexOf(indexPrefix) + indexPrefix.length()); DateTime creationDate = DATE_TIME_FORMATTER.parseDateTime(creationDateString); DateTime startTime = new DateTime(0L); DateTime endTime = new DateTime().minusDays(table.getTtl()).toDateMidnight().toDateTime(); return creationDate.isAfter(startTime) && creationDate.isBefore(endTime); } public static String getTableNameFromIndex(String currentIndex) { if (currentIndex.contains(TABLENAME_PREFIX) && currentIndex.contains(TABLENAME_POSTFIX)) { String tempIndex = currentIndex.substring(currentIndex.indexOf(TABLENAME_PREFIX) + TABLENAME_PREFIX.length() + 1); int position = tempIndex.lastIndexOf(String.format("-%s", TABLENAME_POSTFIX)); return tempIndex.substring(0, position); } else { return null; } } public static String getAllIndicesPattern(){ return String.format("%s-*-%s-*", ElasticsearchUtils.TABLENAME_PREFIX, ElasticsearchUtils.TABLENAME_POSTFIX); } }
Effects of chemical and botanical insecticides used for locust control on Metarhizium anisopliae var. acridum conidia after short- to medium-term storage at 30°C Abstract The short- to medium-term viability and growth of Metarhizium anisopliae var. acridum conidia were investigated when combined with six insecticides, at three different concentrations. All of the insecticides used in this study were suitable for immediate spraying with M. anisopliae var. acridum conidia except for fenitrothion. Fipronil, teflubenzuron, and fenitrothion formulations significantly reduced conidial viability over time. The 10% teflubenzuron treatment caused loss of viability relatively quickly with 9.9% germination after 28 days. Mycelial growth was affected by all the treatments except fenitrothion.
<filename>core/src/listener/gui_server.rs use serde_json::json; use crate::controller::Listener; use crate::model::*; use crate::util::ws_server::{create_ws_server, SendRecv}; pub struct GuiServer { server: SendRecv, } impl GuiServer { pub fn new(port: u32) -> Self { Self { server: create_ws_server(port), } } } impl Listener for GuiServer { fn notify_event(&mut self, stg: &Stage, _event: &Event) { if let Some((s, r)) = self.server.lock().unwrap().as_ref() { // 送られてきたメッセージをすべて表示 loop { match r.try_recv() { Ok(msg) => { println!("[WS] message: {}", msg); } Err(_) => { break; } } } // stageの状態をjsonにエンコードして送信 let value = json!({ "type": "stage", "data": stg, }); s.send(value.to_string()).ok(); } } }
<gh_stars>100-1000 import { Injectable, Module } from "@nestjs/common"; import { expect } from "chai"; import { register } from "prom-client"; import { PrometheusModule, PrometheusOptionsFactory } from "../src"; import { Agent, App, createAsyncPrometheusModule, createPrometheusModule, } from "./utils"; describe("PrometheusModule", function() { let agent: Agent; let app: App; afterEach(async function() { if (app) { register.clear(); await app.close(); } }); describe("#forRoot", function() { describe("with all defaults", function() { beforeEach(async function() { ({ agent, app } = await createPrometheusModule()); }); it("registers a /metrics endpoint", async function() { const response = await agent.get("/metrics"); expect(response) .to.have.property("status") .to.eql(200); }); it("collects default metrics", async function() { const response = await agent.get("/metrics"); expect(response) .to.have.property("text") .to.contain("process_cpu_user_seconds_total"); }); }); describe("when overriding the default path", function() { beforeEach(async function() { ({ agent, app } = await createPrometheusModule({ path: "/my-custom-endpoint", })); }); it("does not register the default endpoint", async function() { const response = await agent.get("/metrics"); expect(response) .to.have.property("status") .to.eql(404); }); it("registers the custom endpoint", async function() { const response = await agent.get("/my-custom-endpoint"); expect(response) .to.have.property("status") .to.eql(200); }); it("collects default metrics", async function() { const response = await agent.get("/my-custom-endpoint"); expect(response) .to.have.property("text") .to.contain("process_cpu_user_seconds_total"); }); }); }); describe("#forRootAsync", function() { @Injectable() class OptionsService implements PrometheusOptionsFactory { createPrometheusOptions() { return {}; } } @Module({ providers: [OptionsService], exports: [OptionsService], }) class OptionsModule {} describe("useExisting", function() { beforeEach(async function() { ({ agent, app } = await createAsyncPrometheusModule({ imports: [OptionsModule], useExisting: OptionsService, inject: [OptionsService], })); }); it("registers a /metrics endpoint", async function() { const response = await agent.get("/metrics"); expect(response) .to.have.property("status") .to.eql(200); }); it("collects default metrics", async function() { const response = await agent.get("/metrics"); expect(response) .to.have.property("text") .to.contain("process_cpu_user_seconds_total"); }); }); describe("useClass", function() { beforeEach(async function() { ({ agent, app } = await createAsyncPrometheusModule({ useClass: OptionsService, inject: [OptionsService], })); }); it("registers a /metrics endpoint", async function() { const response = await agent.get("/metrics"); expect(response) .to.have.property("status") .to.eql(200); }); it("collects default metrics", async function() { const response = await agent.get("/metrics"); expect(response) .to.have.property("text") .to.contain("process_cpu_user_seconds_total"); }); }); }); describe("#createAsyncOptionsProvider", function() { it("throws an error if useClass or useExisting are not provided", function() { expect(() => { PrometheusModule.createAsyncProviders({}); }).to.throw( "Invalid configuration. Must provide useClass or useExisting", ); }); }); describe("#createAsyncOptionsProvider", function() { it("throws an error if useClass or useExisting are not provided", function() { expect(() => { PrometheusModule.createAsyncOptionsProvider({}); }).to.throw( "Invalid configuration. Must provide useClass or useExisting", ); }); }); });
Obesity Prevention from Conception: A Workshop to Guide the Development of a Pan-Canadian Trial Targeting the Gestational Period This report summarizes a meeting, Obesity Prevention from Conception, held in Ottawa in 2012. This planning workshop was funded by the Canadian Institutes of Health Research (CIHR) to bring together researchers with expertise in the area of maternal obesity (OB) and weight gain in pregnancy and pregnancy-related disease to attend a one-day workshop and symposium to discuss the development of a cross-Canada lifestyle intervention trial for targeting pregnant women. This future intervention will aim to reduce downstream OB in children through encouraging appropriate weight gain during the mothers pregnancy. The workshop served to (i) inform the development of a lifestyle intervention for women with a high pre-pregnancy body mass index (BMI), (ii) identify site investigators across Canada, and (iii) guide the development of a grant proposal focusing on the health of mom and baby. A brief summary of the presentations as well as the focus groups is presented for use in planning future research. NutritioN aNd Metabolic iNsights 2014:7 well as strategies specific to individual sites that would facilitate successful implementation and execution of the randomized control trial (RCT) (iii) to collaborate with our knowledge users and policy makers as well as stakeholders to promote the trial at the outset and then continue to work with our partner networks and resources to widely disseminate the future trial findings and improve public health outcomes. Overall, the purpose of this manuscript is to highlight novel and relevant research in the area by summarizing the presentations given on key prenatal lifestyle interventions focusing on physical activity (PA) and nutrition, and outlining the essential elements for implementing a successful trial (eg trial design, recruitment, execution, follow-up, etc.) that were drawn from the workshop. Introduction Obesity (OB) has become a major barrier to the health of people world-wide and is a known risk factor for type 2 diabetes, gestational diabetes mellitus (GDM), ischemic heart disease, hypertension, and several types of common cancers. Maternal pre-pregnancy body mass index (BMI) and gestational weight gain (GWG), which directly or indirectly modify the intrauterine environment, are known to be important predictors of fetal growth 1 and development trajectories as well as contributors to childhood OB. Infant birth weight is highest in pregnancy complicated by overweight (OW) and OB. Similarly, excessive GWG, independent of pregravid BMI, significantly increases the odds of OW in pre-school, adolescence, and adulthood making fetal development a critical period for downstream OB prevention. Furthermore, greater GWG is associated with greater post-partum weight retention (PPWR) and subsequent risk of post-partum OB, which has been linked to a wide range of downstream health conditions for both mothers and their offspring. 9, Maternal BMI and GWG are among the most important issues related to the short-and long-term risks for pediatric and maternal OB. Thus, interventions targeted at decreasing maternal pregravid BMI and preventing excess GWG with respect to recently published guidelines from the 2009 Institute of Medicine (IOM) have the potential to significantly impact public health. In May 2012, a Canadian Institutes of Health Research (CIHR)-funded planning workshop was held that brought together a set of Canadian maternal, fetal, and pediatric health professionals as well as experts in lifestyle intervention, and the relevant knowledge users. Participants, including international speakers from Australia and Brazil, shared expertise and their work in the area of lifestyle interventions aimed to improve maternal-fetal health. The objectives of the workshop were multidimensional and aimed at enhancing national research collaborations prioritizing the development and implementation of a pan-Canadian trial focusing on GWG management to attenuate downstream OB in offspring by promoting optimal growth trajectories. Knowledge synthesis and exchange was accomplished through presentations of the research projects from attendees as well as breakout sessions where subgroups were set up so that those in attendance could discuss and share their expertise, experiences, setbacks, and successes relating back to the pan-Canadian trial design. The goal of the workshop was to identify the key characteristics and required elements for an effective intervention protocol to minimize excessive GWG and prevent subsequent maternal and childhood OB. Further objectives included: (i) identifying trial sites, facilitators, and teams of investigators to participate in the development of a pan-Canadian trial targeting the gestational period for submission to peer-reviewed funding agencies and (ii) determining a set of recruitment strategies and study logistics that could be implemented across Canada as NutritioN aNd Metabolic iNsights 2014:7 The sample size required detecting a clinically relevant difference of 0.4 in offspring BMI z-score at two years of age with 80% power, a significance level of 5%, and a very conservative standard deviation of 1.4 is 386 women (193 intervention vs. 193 control). However, our MOM trial feasibility pilot aimed to recruit 60 women to inform future power calculations as well as better understand and optimize study logistics including coordination. Taking into account drop-out and loss to follow-up, the MOM trial currently has 50 active participants, 32 intervention MOMs, and 16 controls. All active participants have given birth and are in the follow-up phase with their child. Nine mother-baby pairs have completed the final two-year follow-up visit. In addition to the study protocol, 34 we published preliminary data on the women's attitudes toward GWG as part of a book chapter. 35 Data on the intervention compliance and pregnancy outcomes of the study will be published in 2014. The main limitations noted thus far were the lack of flexibility and generic program delivery as obstacles to participant adherence and compliance. Mothers struggled to make it to regularly scheduled, time-specific PA classes and nutrition workshops, and they often asked for a home program that better suited their lifestyle and needs. Adherence was particularly troublesome for pregnant women with other children. Limiting weight gain in OW and OB women during pregnancy to improve health outcomes (LIMIT): a randomized trial-Professor Jodie M. Dodd, Ms Andrea R. Deussen, on behalf of the LIMIT investigative team, Adelaide, Australia. The Australian LIMIT trial was designed to assess whether the combination of dietary and lifestyle (ie PA) advice provided to OW and OB pregnant women to restrict weight gain during pregnancy is effective in improving maternal, fetal, and infant health outcomes. The primary hypothesis of the trial was that dietary and Therefore, the primary aim of the MOM trial pilot RCT, a study part of the CIHR SOMET (Sherbrooke, Ottawa, Montreal, Emerging Team) grant, is to determine whether a structured prenatal PA and nutrition intervention provided to pregnant women during their second and third trimesters reduces offspring BMI z-score at two years of age. Furthermore, we aimed to assess the feasibility of implementing this intervention and its potential effects on pregnant women meeting the updated IOM GWG guidelines, infant birth weight, macrosomia; small-for gestational age (SGA), appropriate-for gestational age (AGA), large-for gestational age (LGA), PPWR, and body composition at one year. At the time of trial entry, women were stratified according to their BMI (NW, OW, or OB) and parity-a detailed protocol for the MOM trial has been previously described. 34 When designing the MOM trial, it was hypothesized that a greater number of offspring born to women in the intervention group will follow a healthier growth trajectory and thus fewer would be categorized as OW/OB at two years of age. Further, secondary hypotheses were that a larger proportion of participants randomized to the intervention group would meet the IOM-recommended GWG guidelines and that they would give birth to fewer macrosomic offspring while experiencing less PPWR than those in the control group receiving standard obstetrical care. As both pre-pregnancy BMI and excessive weight gain in all women can contribute to childhood OB, women aged 18 years and more with a BMI above 18.5 were included. Women carrying a single fetus were recruited before 20 weeks gestation through ultrasound clinics, hospitals, obstetricians, midwives, and flyers. Women were randomized into the control or intervention group. Figure 1 outlines the measurements completed on mother and child. the intervention women. Secondary study outcomes included a variety of maternal and infant clinical health outcomes, maternal quality of life and well-being, and health care costs. Additional measures to inform mechanistic studies designed to explore potential molecular links between maternal OB and GWG and neonatal growth were also collected and are outlined in Figure 2. The estimated sample size for this trial was 2,180 women and was powered to detect a 30% reduction in the risk of infants born LGA from 14.4 to 10.1%, allowing for 15% attrition. Recruitment of women took place at three metropolitan hospitals representing 10,600 births per annum (approximately 400 per week) across 26 obstetric booking clinics each week. At the majority of clinics, women were approached and screened by research assistants. Recruitment for the trial commenced in May 2008 and was completed in February 2012, and the last infant was born on July 2012. Approximately 150 eligible women were identified each month of which approximately 40% consented to participate. Follow-up of women and their infants at 6 and 18 months post-partum is currently underway. Approximately 45-50 women were randomized each month to either the treatment or control group. Women in the treatment group received written information about healthy diet and exercise lifestyle intervention provided to OW and OB women during pregnancy would reduce the risk of infants born LGA (defined as birth weight above the 90th percentile for gestational age and sex). In this multi-center RCT, women were eligible for inclusion if they were giving birth to a live, singleton infant; were recruited between 10 and 20 weeks gestation; and were OW or OB. At the time of trial entry, women were stratified according to their BMI (OW vs. OB), parity (nulliparous vs. multiparous), and the center where they planned to give birth. Women randomized to the dietary and lifestyle intervention group received a comprehensive intervention aiming to limit GWG in pregnancy that included a combination of dietary, exercise, and behavioral strategies, delivered by trained research dietitians and research assistants. The intervention group was divided into three sub-categories: group versus individual dietary and exercise counseling, group versus individual-based exercises, and provision of informational DVD or no DVD. The dietary advice provided was consistent with current Australian dietary standards, whereas PA advice was based on the guidelines of Royal College of Obstetricians and Gynaecologists (RCOG, UK). Booklets of dietary guidelines; exercise guidelines; for recording diet and exercise goals and achievements; and recipe were provided to the intervention group. An informational DVD was given to half of (N = 131). Although birth weight and total percentage of body fat were not different between the NELIP and the reference groups, the NELIP babies had higher triceps (0.84 ± 0.2 cm vs. 0.68 ± 0.2 cm) and anterior thigh skinfolds (1.2 ± 0.3 cm vs. 0.91 ± 0.3 cm) compared to the reference group, respectively. Following the original NELIP evaluation, we examined the weight history of OW and OB women (N = 107) before pregnancy. 38 In this sample, 67% had unstable body weights before pregnancy, and 88% of the multiparous women had post-partum weight retention on an average of 13.0 ± 9.4 kg. A total of 41% had an OW maternal grandmother and 54% had an OW mother, and the average BMI of the baby's father was 27.7 ± 5.3 kg, indicating that many of these women had a family history of OB and usually partnered with OW men. This indicated that it was important to include a family-based component with a behavior specialist using a two pronged approach within the NELIP intervention at 16-20 weeks gestation and then re-initiate at two months post-delivery until 12 months. Subsequently, we developed a family-based behavioral treatment (FBBT) program to be delivered with NELIP during pregnancy with a re-introduction of the program at two months post-partum (Clinicaltrials.gov (NCT01129505)) with a stroller walking program. We also added a behavioral component in a group session format that included sessions during pregnancy including Phase 1 (weeks 1 and 2 of the program): Motivation and Initiation-"Healthy Me, Healthy Baby," Phase 2 (weeks 3-6): Adoption-"A Time for Change," and Phase 3 (weeks 7-18): Maintenance-"Sticking with Things." We have just completed this study with one year follow-up and are currently examining the difference between the NELIP with no post-partum intervention (n = 90) compared to the family-based NELIP (NELIP and FBBT) reintroduced at two months post-delivery (n = 52), and we are following the maternal-infant pairs to one year post-partum to assess chronic disease risk of the mothers and the body composition of the babies. We were concerned with the observation that many of our women gained excessive weight before the intervention began at 16-20 weeks gestation. We have recently published a paper in pregnancy, attended two appointments with a dietitian, and were contacted by telephone on three occasions to review diet and exercise changes and goals. Women in the control group received the current standard antenatal care in Australia with limited dietary and exercise information. The main barriers to recruitment included time constraints at the first prenatal visit, sensitivities regarding weight-related discussions, protectionism of the staff, and resource-intensive nature of the study. On the other hand, some recruitment procedures that aided in patient enrollment (ie "recruitment enablers") included ensuring research staff members were sensitive and actively listened to women telling their story about the current pregnancy, and ensuring women were informed of the screening process that involved calculation and classification by BMI. Finally, it was important to develop a positive and professional relationship between the research assistants and clinic staff (particularly, research staff respecting and prioritizing the core work of the clinic) that ensured sustained enrollment. Overall, the LIMIT trial was successful at designing and implementing a multi-centered RCT. A great strength of this trial, which is an important factor to take into account for a pan-Canadian trial, was the integration of study staff at the clinics. This allowed effective recruitment and meeting the large sample size goal. The Nutrition and Exercise Lifestyle Intervention Program (NELIP) experience in London, Canada-Michelle F. Mottola, PhD, FACSM, on behalf of the NELIP team, London, ON, Canada. The NELIP investigates the effects of nutrition and exercise on OW and OB pregnant women. It aims to prevent excessive weight gain and the development of GDM. This program was evaluated with a single-arm intervention study using historical controls matched by prepregnancy BMI, age, and parity at a ratio of four controls to one intervention participant. All women were medically prescreened, and 90 women entered the study at 16-20 weeks gestation. Figure 3 outlines the study assessment timeline. They followed NELIP until delivery and completed 24-hour food records weekly. The nutrition intervention was a modified GDM diet with a daily target of 2000 kcal, 200 g carbohydrate (40-55% of total energy), emphasizing small frequent meals and providing education on serving size. The exercise program was individualized based on a peak fitness test (30% of peak heart rate (HR) reserve). Participants were required to walk three to four times per week with an average target HR of approximately 118 beats per minute (bpm). They were also required to wear a pedometer and keep an exercise log. The preliminary results found in Table 1 were published in 2010. 36,37 There was also a decrease in the number of babies born with a birth weight between 4.0 and 4.5 kg in the OW NELIP women (3.2%) compared to the matched cohort (18%). At delivery, within 6-18 hours after birth, girth and skinfold measurements were taken for the NELIP babies and compared to a reference group of NW women Experience from the community-based dietary and exercise trials in Manitoba-Garry Shen, MD/PhD, on behalf of the IDEA (impact of diet and exercise activity on pregnancy outcome) trial team, Winnipeg, MB, Canada. The IDEA study group is led by Dr Garry Shen at the University of Manitoba. The group has conducted a series of epidemiological studies on GDM in Manitoba. Their studies analyzed more than 9,000 women with GDM and 300,000 births between 1985 and 2004 across the province. It was discovered that the prevalence of GDM in pregnant First Nations (FN) women was two to three times higher than in non-FN women in urban or rural areas. 40 The findings demonstrated the tendency toward GDM and other unfavorable pregnancy and/or fetal outcomes (delivery of macrosomic infants and increased risk of dystocia) in Manitoba and the greater risk for FN women, particularly those living in rural/remote regions of Manitoba. Following these findings, the IDEA group developed a community-based lifestyle intervention program for pregnant women. 41 The program has been evaluated through a randomized controlled trial for urban-living pregnant women who were recruited within 20 weeks of pregnancy. Participants with known diabetes, and medical and obstetric contraindications to exercise or multiple gestations were excluded. All participants consented and were randomized into the control or intervention groups. The primary end point of the study is excessive weight gain according to 2009 IOM guidelines. Secondary outcomes include GDM, LGA infants, maternal PA, and food intake. The exercise program for pregnant women in the intervention group included weekly group sessions and home exercise three to five times per week. Our team developed an instructive exercise video suitable for pregnant women to standardize group exercise and to assist home examining 172 pregnant women: 33.7% were NW, 33.7% were OW, and 32.6% were OB 39 who participated in the NELIP program. We examined weight gain before the intervention at 16-20 weeks, and weight gain on the NELIP intervention to delivery. To examine the timing of excessive GWG compared to newborn body fatness at birth, we div ided the group of women into appropriate GWG (ie within IOM recommendations) in the first and second halves of pregnancy (overall appropriate), appropriate GWG in the first half of pregnancy and excessive GWG in the second half of pregnancy (late excessive), excessive GWG in the first half of pregnancy and appropriate GWG in the second half of pregnancy (early excessive), and excessive GWG throughout pregnancy (overall excessive) based on their pre-pregnancy BMI category. We then compared these groups to infant birth weight and infant adiposity. The results indicated that the timing of excessive maternal weight gain, specifically during the first half of pregnancy, is a stronger predictor of infant body fatness at birth than total maternal weight gain regardless of pre-pregnancy BMI. Thus, the timing of excessive maternal GWG is important, and perhaps interventions aimed at prevention should occur earlier than 16 weeks, even in the pre-conception period, which may suggest an important role of the early maternal in utero environment on the fetal programming of OB. Although the use of a historically matched control is a limitation of these studies, this design allowed for successful completion of the study given that retention of control participants is a significant issue with RCTs. The NELIP trial did successfully recruit participants, had low attrition rates, and had results that showed promise toward the direction the group hypothesized. An important finding of these studies was that prevention of excessive GWG should be initiated as early as possible. accurate as previous research has demonstrated. 45 Overall, the messages learned from this study regarding planning and implementation align with the previous prenatal interventions discussed. The effect of an antenatal physical exercise program on maternal/perinatal outcomes and quality of life in OW and OB pregnant women: a randomized clinical trial-Simony Lira do Nascimento, So Paulo, Brazil. In this intervention study, OW and OB pregnant women were randomly assigned to either an intervention group that received prenatal nutrition information and an exercise protocol including home exercise counseling or a control group that had prenatal and nutrition information only. The primary outcome in the study was a reduction in excessive weight gain during pregnancy, and the secondary outcomes were maternal weight gain, blood pressure, quality of life, and fetal outcomes (newborn weight, Apgar scores, prematurity rate, and adequacy). Participants were OW pre-pregnancy, 18 years old, and were recruited at 14-24 weeks gestation. Women with absolute contraindications to exercise during pregnancy 46 were excluded. The data collection took place, according to that outlined in Figure 4, from August 2008 to 2010. The intervention program counseling covered IOM GWG recommendations specific to their BMI category, 37 the importance and effects of PA during pregnancy, the optimal amount and intensity of home exercise, general healthy nutrition counseling and appropriate caloric intake for exercising, suitable clothing to wear when exercising, signs and symptoms to look out for during exercise, and when to cease PA. The exercise program was designed to enable pregnant women to increase their level of PA and to improve their quality of life through simple exercises that could be performed without supervision and that did not present a risk to the mother or the fetus. The program was composed of two components: an exercise protocol once a week supervised by a trained physiotherapist and a home exercise protocol or walking five days per week, recorded in an exercise journal/diary. The exercise protocol included 22 exercises with intensity varying from light to moderate for a total of a 40-minute session (10 minutes stretching, 20 minutes muscle training, and 10 minutes relaxation) such that the HR must be kept below 140 beats per minute. 46 Before and after exercise class, their blood pressure was measured and they were weighed. A total of 40 women were randomized to the intervention and 42 to the control group, and 39 and 41 completed the follow-up, respectively. The majority had less than a high school education, were without remuneration or unemployed, and had more than one child. Prevalence of hypertension and/ or diabetes was high in this population. At baseline, most women were at approximately 17 weeks gestation. According to the IOM's weight gain guidelines, 48% of the intervention participants compared to 57% of the controls gained excessive weight (P = 0.43). There was a high rate of cesarean-section exercise. A professional trainer and local pregnant women with different cultural backgrounds were invited to be cast members of this video. The video includes walking, and aerobic and strength exercises. Dietary interview and counseling were provided to participants in the intervention group. An example of dietary interview record with assistance of Food Choice Map software is available. 42 Dietitian and interviewee collaboratively placed magnetic stickers with images of foods on the map that represent the frequency and quantity of food intake. The information was scanned into a computer after the interview, and the data were analyzed instantly. Dietitians provided individual counseling to participants based on the data during the session. At the time of analysis in 2010, 190 participants had completed the program, 88 in the control group and 102 in the intervention group. No significant differences were detected in age, pregravid BMI, proportions of FN, or family income between the two groups. More than 25% of participants had FN status, and more than 70% the participants were from families with low or below average incomes. PA at baseline was comparable between the two groups. Two months after the implementation of the intervention, participants in the intervention group had significantly higher PA than the control group. Similarly, there was no significant difference in total caloric intake between the two groups at baseline; however, two months after enrollment, the intervention group had a significantly lower total caloric intake than the control group as well as a significantly lower intake in total fat, saturated fat, and cholesterol. The intervention group gained 7% less weight during pregnancy than controls, but the difference was not statistically significant. Birth weight and the rates of LGA, GDM, and cesarean-section were not significantly different between the two groups. Excessive GWG in the intervention group was significantly lower than that in the control group according to the 2009 IOM guidelines. 43 The findings suggest that community-based exercise and individualized dietary counseling during pregnancy may increase PA, improve dietary habits, and reduce excessive GWG in urban-living pregnant women. As FN women in rural regions have higher risks for GDM, OB, and type 2 diabetes, the IDEA study group has undertaken nonrandomized studies to promote healthier lifestyle to improve the health of FN women and children in rural/remote FN communities in Manitoba. 44 Overall, from the IDEA trial we learned that recruitment of participants may be improved by stronger advertisement and by training family doctors and midwives in the city on the principles outlined in the study. An understanding that randomization may affect recruitment to a certain degree is important when planning a large-scale study. Maintaining adherence is a challenge for the study while close followup and peer support may help to improve the adherence of participants to the study. Finally, it is important to note that PA assessments through self-reports may not be sufficiently Randomization to the control group leads to an increased likelihood of participant drop-out; these women enroll because they are ready for a lifestyle change and they seek the support provided in the intervention aspect of the study. It is agreed that an incentive of some sort should be provided to the control participants to increase the likelihood that they continue participating in the trial. However, there are institutional differences in research ethics board acceptance of this practice. Adherence to exercise programs in general is suboptimal, and thus compliance to the intervention itself can be a challenge when the sessions are at scheduled times. In addition, childcare can be an obstacle limiting participation in exercise in weekday group classes for women with other children. Many health care providers find it difficult to approach OW and OB pregnant women, and some of these women may be resistant to weight-related dialog because of pre-existing weight issues. Pregnancy may exacerbate this sensitivity, making recruitment and adherence to intervention programs challenging. The best approach to recruitment seems to be the integration of research staff into clinics and building relationships with the staff as was done in the Australian LIMIT trial. This approach appeared to have alleviated part of the recruitment issues faced in the MOM trial and Brazilian trial. Moreover, the timing of the intervention is crucial. It appears that intervening before 16 weeks may be necessary to have a significant impact on GWG. Finally, PA self-reporting is notoriously inaccurate and accelerometers are expensive to purchase and replace, thus identifying the need for an affordable and reliable tool that quantifies PA. and LGA newborns in both groups. There was no effect on birth weight, gestational age at delivery, or Apgar score. The data suggest that women may benefit from exercise during pregnancy without compromising the health of the baby. We found a significantly lower total weight gain, weight gain in program, and weekly weight gain in the intervention OW women compared to the control OW women (P  0.038). There were no significant differences on GWG for the OB women. As has been expected, certain aspects of quality of life were low at the end of pregnancy, such as physical and social categories, without statistically significant differences between the groups. Limitations included a low adherence/compliance mainly in relation to home exercise counseling. 62% recorded their exercise routine in a diary with a mean of 12.3 weeks of home exercise and the average minutes of weekly exercise with 57 ± 22.2 exercise protocol minutes per week and 79.8 ± 46.9 walking minutes per week while the recommendation was 150 minutes. Another difficulty was in determining and ensuring the optimal type, frequency, intensity, and duration of exercise at home. This study was also limited by the small sample size, social conditions, and barriers to participation, such as cultural perception of having to rest during pregnancy, cost of transportation, proper clothing to wear while exercising, and childcare. The important lessons learned from these national and international projects to be applied to a future pan-Canadian trial include addressing (i) high attrition, (ii) loss to followup, (iii) lifestyle intervention fidelity, (iv) obstacles to recruitment and adherence, and (v) burden to the participant. safety; yet walking groups in a safe place (mall, arena, school, etc.) may circumvent this issue. If walking is the exercise of choice, an effective way to monitor it must be selected above and beyond self-reported log books. Options include pedometers (everyday use) or accelerometers (more comprehensive time stamped, capturing omni-directional movement, ability to provide data on intensity), which allow for goal setting and monitoring of progress. A study minimum of a 10,000 steps/ day (evidence-based recommendation for health chronic disease prevention) 47 could be set and increased once or several times throughout the pregnancy depending on progress. As for the timing of the PA intervention, there could be multiple arms, intervening during early and later stages of pregnancy as well as post-partum. The type of exercise chosen will ultimately depend on the main outcome of the study (eg GWG vs. general health improvement). Group 3: Designing an effective nutrition intervention during pregnancy. The first consideration in designing the nutrition intervention is to decide on a valid and reliable method to quantify baseline eating behavior of study participants. For example, before study initiation a tool must be selected to identify a nutrition plan for the women and may include a sevenday food record, diet prescription based on resting energy expenditure (REE), a food frequency questionnaire, or comprehensive interview conducted by a dietitian. Furthermore, a decision on whether personal circumstances would be taken into account is required and if so how to best manage these considerations. For example, the number of dependent children at home, type of employment, cooking skills, nutrition knowledge, and income are important factors that can enable or prevent the success of the intervention. A "knowledge of nutrition" screening test could be completed at baseline; this could build in cultural components and SES/food insecurity questions. Once this is established, a daily caloric goal and plan will be decided upon. For the nutrition plan, it can be either a general advice or modifications to current dietary habits and the study-specific interventionist will work with the participant to increase adherence. Furthermore, although there are no set guidelines on extra calories, current literature points toward a small requirement for additional calories during the second and last trimesters and Health Canada endorses the IOM recommendation 48 that an additional 1420 kJ (340 kcal) and 1880 kJ (450 kcal) is sufficient to support healthy weight gain during the second and third trimesters of pregnancy, respectively. However, it is important to identify baseline eating behaviors as many women are already consuming calories in excess regardless of pregnancy status. Further, assessment and monitoring of energy intake is important and there are many potential ways this can be accomplished. Consensus from the meeting indicated that this may be done via hand-written logs, online diet records, or 24-hour food recalls. Additional information may be provided through multiple avenues including a pregnancy handbook, webinars, podcasts, or postcards send via mail. Finally, ongoing support may be provided through Summary of Breakout Sessions Following the presentations and overview of current prenatal interventions, attendees were divided into groups based on their expertise for the afternoon portion of the meeting to discuss various requirements in planning the pan-Canadian trial. Below is a description of the key consideration points concluded from each breakout session. Group 1: Methodological considerations when implementing a multi-site RCT. Among other considerations, the direction of the pan-Canadian pregnancy weight management trial requires the decision of the design (cluster vs. non-cluster) and the primary outcome. The options discussed included child adiposity or BMI outcome downstream, macrosomia (birthweight  90th percentile), meeting the IOM GWG guidelines, and infant birth weight. Potential secondary outcomes of interest are as follows: rate of weight gain, GWG, LGA, PPWR, adverse prenatal/ antenatal outcomes, barriers to adherence, self-efficacy, and quality of life. For the sample size, a realistic target from a costing and logistics perspective would be approximately 1,000 participants; however, there are certain caveats to consider. First if a downstream child adiposity outcome is chosen, there is considerable evidence to suggest BMI z-score at two years would be an excellent marker, but loss to follow-up is high with such a distant primary outcome. If macrosomia rate or infant birth weight is selected, the target sample size of approximately 1,000 is not high enough to detect a reduction in these outcomes. Thus, a shorter term child-body composition outcome should be considered. It is customary that multi-center trials have one central coordination site and with the help of health informatics, the management of data would be centralized and electronic to facilitate data sharing. The RCT design could range from two to four arms with options for each arm including diet, exercise, combination (diet and exercise), and control. Furthermore, to simplify the randomization of multiple centers, it was decided that clusterrandomization might be logistically simplest and applied by practice and stratified by BMI category (ie OW/OB/NW). Group 2: Designing an effective PA intervention. For the intervention aspect of the study, there was a debate over personalized plans versus standardized "one-size fits all" programs and how to best assess/monitor the effectiveness of these programs' ability to change PA behaviors. There was a consensus that an initial in-person visit for safety advice and guidance is necessary. Follow-up may be provided by telephone counseling, internet counseling, or smart phone application. All women would be monitored; however, only those who are falling below PA guidelines or above sedentary guidelines would be prompted with active strategies to encourage guideline attainment. For exercise modality, there was a strong consensus toward implementing a walking program as it is accessible to almost everyone, easy to implement, and requires very little equipment and limited training. However, it is dependent on weather and neighborhood NutritioN aNd Metabolic iNsights 2014:7 Conclusions We are currently developing a pan-Canadian trial using the information discussed at the meeting as well as the recent evidence that has emerged in the literature since. Our plan is to implement an intervention strategy that will be feasible to integrate within the existing health care system as to complement current health services and not disrupt standard care provision. The main issues to address in the planning of the pan-Canadian trial are as follows: -dealing with the delicate and sensitive weight-related issues during pregnancy (see Canadian Obesity Network 5As for healthy pregnancy weight gain) 49 -compliance to behavior intervention and study in general -dealing with randomization effect on drop-out rate -face-to-face recruitment and integrating study into day-to-day activities in clinics and thus build relationship with the staff members -timing of the intervention to have a significant impact on GWG, before 16 weeks -involving a corporate partner to have incentive to keep controls actively enrolled -PA self-reports are not sufficiently accurate, and accelerometers are expensive to purchase or replenish if misplaced. Pedometers are reasonably priced and may be good motivators -lack of childcare limits participation in exercise at home and for group classes -electronic tools to help in management of trial, data, and intervention aspects are necessary. In-person or provider-based delivery of weight management interventions is not feasible to reach a national population as required from a public health perspective. Workshop attendees indicated that intervention participants require personalized, real-time feedback and a desire for more control over program scheduling. Thus, addressing limitations to current practices in managing and intervening with regard to GWG requires novel, innovative, and strategic approaches to optimize the health of mom and baby. Therefore, the overall recommendation from the workshop indicated the need to capitalize on the rapidly emerging usage of eHealth resources and mobile technologies to deliver health behavior interventions in terms of promoting healthy eating and PA and managing GWG. Mobile and other technologies are now ubiquitous in modern society with 99% of the Canadian population having access to wireless networks and 90% of the population owning a mobile phone. 50,51 Furthermore, approximately 70% of women of child bearing age are regular smartphone users. 50,51 While studies involving such technologies can improve health behaviors in non-pregnant populations 52,53 or in post-partum women, 54 few have explored their utility in the prenatal and early post-partum period. Therefore, mobile technology-based web application dedicated specifically for the study, through Facebook or automated SMS (ie text) messages. With respect to the exercise information, it was recognized that centralized and uniform messaging is crucial to ensure success for these pregnant women. Group 4: Strategies to maximize recruitment. With respect to intervention enrollment, recruitment may be accomplished by "flagging" potential participants through medical charts at participating centers. Traditional pamphlets and flyers may be an additional strategy and could be posted/ distributed through prenatal classes, ultrasound clinics, family physician offices (initial confirmation of pregnancy), OB/ GYN clinics, midwives, public health organizations, and pharmacies. Integration of research staff directly into clinics or contributing to a portion of clinic staff remuneration may facilitate recruitment and acknowledge the staffs' efforts in trial participation. Engaging midwives, family doctors, and clinic staff before the study launch is crucial to ensure reach to as many potential participants as possible. Group 5: Thinking forward-knowledge translation (KT) strategies to improve nation-wide education of expecting mothers. KT is a key component for this pan-Canadian study. The future study will be designed to respond to the gap of readily available access to evidence-based information for pregnant women and the inadequate personalized guidance they receive from prenatal health care providers. We plan to engage in a dynamic and integrated form of KT for the key target audiences: pregnant women, health care providers, and policy makers. We plan to adapt the intervention tools through consultation with pregnant women and health care providers over the course of pre-RCT testing. By educating the care providers on the proposed study objectives, goals, purpose, and clinical importance, they will provide information to patients, which is essential for the success of the future trial. To have sustainable impact on prenatal care practice and on long-term health in Canada, it is critical to target policy makers who can facilitate change, and influence implementation and integration of intervention tools into health care planning and delivery strategies. While our final dissemination strategy is contingent on the study progress, we plan to disseminate the results to pregnant women through social media, pregnancy forums, and blogs using a project "infographic". To reach health care providers, who expect clear, action-oriented messaging delivered through their professional organization, we intend to leverage our various knowledge user-based partnerships to allow for local, provincial, and national KT to be delivered through various modalities. Importantly, knowledge partners will work with us to champion our quest to incorporate a competency-based OB and GWG theme into their continuing medical education requirements. It is through policy makers and their directives that the usefulness of intervention tools is acknowledged and ultimately accepted as benchmarks of best practice. NutritioN aNd Metabolic iNsights 2014:7 interventions are being developed to curb excessive GWG and convey primary health information that is critical to the wellbeing of both mom and baby. These interventions are expected to have a beneficial impact as a cost-effective solution for providing frequent, "real-time" feedback to the patient without the use of additional clinic visits, thus reducing the burden of the currently underfunded and overutilized Canadian health care system. The ultimate goal will be adoption of these eHealth technologies into regular prenatal care to provide women with the opportunity to extend their pre and pregnancy-related health knowledge and greater personal engagement in their prenatal care through access to information and regular feedback. Thus implementing a novel study of this nature may ultimately lead to a reduction in pregnancy-related maternal-fetal complications, establish a strong healthy foundation at birth, and promote optimal long-term child wellness for future generation.
Vietnam's top diplomat wrapped up his visit to North Korea on Thursday apparently meant to discuss issues related to leader Kim Jong-un's upcoming trip to the Southeast Asian nation. Foreign Minister Pham Binh Minh, who doubles as Vietnam's deputy prime minister, arrived at a Beijing international airport on an Air Koryo flight from Pyongyang. He's apparently in transit back to Vietnam following a three-day trip to the North's capital, where he reportedly had meetings with Foreign Minister Ri Yong-ho and Ri Su-yong, who's in charge of international affairs at the Workers' Party of Korea. His entourage included Mai Phuoc Dung, chief of state protocol at the Ministry of Foreign Affairs, and the ministry's spokeswoman Le Thi Thu Hang. The two sides exchanged in-depth views on regional and international issues of mutual concern and ways to expand bilateral relations, according to Pyongyang's state news agency KCNA.
def to_dict(self): result = {'mediatype': self.mediatype} result.update({prop: self.__getattribute__(prop) for prop in ['videoid', 'movieid', 'tvshowid', 'seasonid', 'episodeid'] if self.__getattribute__(prop) is not None}) return result
`NOT many people come to St. Ives from the United States, and especially not from Texas. In fact, I expect you're the only person from Texas in Cornwall,'' the man said. He was looking in a store window. I was looking in the same window. He was waiting for his wife, who was shopping. We began to talk, and he insisted that I go in and meet his wife. Inside the shop, he introduced her and his mother, and then called his daughter who was trying on a dress to come out of the dressing room and meet me. ``The lady is from Texas,'' he emphasized, when he introduced me. I had gone to St. Ives simply because I had to leave my college at Oxford for a break of two weeks. I had been studying for two months and would soon rejoin my classmates, who were traveling in Europe, back at Oxford where we would finish our instruction. ``This lady is here all the way from Texas,'' he said again to his wife. ``Where are you staying?'' she asked. ``The Pedn-Olva hotel,'' I said. I waited for an explanation of this interest in me and my lodging, and I soon discovered that the family was staying there, too. Would I join them at their table for my meals, or did I wish to eat alone? The dining room was formal, and the hotel by the sea at Cornwall was filled with English families on vacation. The one thing about my holiday that I had not enjoyed was eating alone. But I hesitated in answering. Why would this vacationing family want me, a stranger in England, to join them at mealtime? The English were supposed to be so cold and unfriendly. What if the situation were reversed and my husband back home in Texas brought a lonely woman from England to join us for meals on our family vacation? What would I say? How would I react? But the wife was standing in front of me, waiting for an answer, wanting me to come. Would I? I would, yes, I would. That evening in the dining room, the maitre d' seated me with the English family. As we looked out on the sea, at the gentle waves slapping against the white sands, at the black rocks and colorful umbrellas, they asked questions about Texas and about the lighthouse off in the distance, the one Virginia Woolf had written about. I also learned that the husband was a geologist and the wife a teacher. They were both working to send their daughters to college in Leeds. ``But why,'' I asked again, ``do you want me to share your family vacation? Me, a stranger. Are you sure I'm not an imposition?'' They were polite, yet I knew that they were evading my question. Each evening, they watched television or went for a walk along the sea wall. They often invited me to join them, but since I had work to do, we parted, and I returned to my room. One evening I changed my mind and hurried to overtake them. I wanted to join them on their walk as a celebration of our last evening together. There was a carnival-like atmosphere, and the streets were crowded with evening strollers. When I did catch up with them, they were talking about me. ``Do you think she thinks we are friendly?'' asked the husband. ``Not as friendly as the Texans, I'm afraid,'' the wife answered. When they saw me and realized that I had heard their conversation, they were embarrassed. ``You're twice as friendly as any Texan I've ever met,'' I said. ``That couldn't be,'' said the Englishman. ``But you are,'' I insisted. ``You wouldn't be putting us on, would you now?'' asked the wife. ``But we thought... ,'' she said. ``Well, it's like this,'' he hesitated. ``We thought you didn't like us. You always rushed back to your room,'' she said. ``But I do like you.'' And then I explained about the work I had wanted to do before I went back to Oxford. Then they told me a story about a Texan they had known when they were children during the war. The lonely American G.I. missed his family back home in the United States, and he made friends with children near his own children's ages. ``He even let me drive his Jeep while I sat on his knee,'' said the man. The three of us walked down the street, arms locked. And I accepted their thanks to an American soldier from Texas.
<filename>drools-core/src/main/java/org/drools/core/io/impl/ClassPathResource.java<gh_stars>1-10 /* * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.core.io.impl; import java.io.ByteArrayInputStream; import java.io.Externalizable; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.ObjectInput; import java.io.ObjectOutput; import java.io.Reader; import java.net.URL; import java.util.ArrayList; import java.util.Collection; import java.util.List; import org.drools.core.io.internal.InternalResource; import org.drools.core.util.IoUtils; import org.drools.core.util.StringUtils; import org.drools.wiring.api.classloader.ProjectClassLoader; import org.kie.api.io.Resource; import org.kie.api.io.ResourceType; /** * Borrowed gratuitously from Spring under ASL2.0. * *+ */ public class ClassPathResource extends BaseResource implements InternalResource, Externalizable { private String path; private String encoding; private ClassLoader classLoader; private Class< ? > clazz; public ClassPathResource() { } public ClassPathResource(String path) { this( path, null, null, null ); } public ClassPathResource(String path, Class<?> clazz) { this( path, null, clazz, null ); } public ClassPathResource(String path, ClassLoader classLoader) { this( path, null, null, classLoader ); } public ClassPathResource(String path, String encoding) { this( path, encoding, null, null ); } public ClassPathResource(String path, String encoding, Class<?> clazz) { this( path, encoding, clazz, null ); } public ClassPathResource(String path, String encoding, ClassLoader classLoader) { this( path, encoding, null, classLoader ); } public ClassPathResource(String path, String encoding, Class<?> clazz, ClassLoader classLoader) { if ( path == null ) { throw new IllegalArgumentException( "path cannot be null" ); } this.path = path; this.encoding = encoding; this.clazz = clazz; this.classLoader = ProjectClassLoader.getClassLoader(classLoader == null ? null : classLoader, clazz, false); setSourcePath( path ); setResourceType( ResourceType.determineResourceType( path ) ); } public void writeExternal(ObjectOutput out) throws IOException { super.writeExternal( out ); out.writeObject( this.path ); out.writeObject( this.encoding ); } public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { super.readExternal( in ); this.path = (String) in.readObject(); this.encoding = (String) in.readObject(); } /** * This implementation opens an InputStream for the given class path resource. * @see java.lang.ClassLoader#getResourceAsStream(String) * @see java.lang.Class#getResourceAsStream(String) */ public InputStream getInputStream() throws IOException { return bytes != null ? new ByteArrayInputStream( this.bytes ) : this.getURL().openStream(); } /** * This implementation returns a URL for the underlying class path resource. * @see java.lang.ClassLoader#getResource(String) * @see java.lang.Class#getResource(String) */ public URL getURL() throws IOException { URL url = null; if ( this.clazz != null ) { url = this.clazz.getResource( this.path ); } if ( url == null ) { url = this.classLoader.getResource( this.path ); } if ( url == null ) { throw new FileNotFoundException( "'" + this.path + "' cannot be opened because it does not exist" ); } return url; } public boolean hasURL() { return true; } public String getEncoding() { return encoding; } public Reader getReader() throws IOException { if ( this.encoding != null ) { return new InputStreamReader( getInputStream(), encoding ); } else { return new InputStreamReader( getInputStream(), IoUtils.UTF8_CHARSET ); } } public boolean isDirectory() { try { URL url = getURL(); if ( !"file".equals( url.getProtocol() ) ) { return false; } File file = new File( StringUtils.toURI( url.toString() ).getSchemeSpecificPart() ); return file.isDirectory(); } catch ( Exception e ) { return false; } } public Collection<Resource> listResources() { try { URL url = getURL(); if ( "file".equals( url.getProtocol() ) ) { File dir = new File( StringUtils.toURI( url.toString() ).getSchemeSpecificPart() ); List<Resource> resources = new ArrayList<Resource>(); for ( File file : dir.listFiles() ) { resources.add( new FileSystemResource( file ) ); } return resources; } } catch ( Exception e ) { // swollow as we'll throw an exception anyway } throw new RuntimeException( "This Resource cannot be listed, or is not a directory" ); } public ClassLoader getClassLoader() { return this.classLoader; } public void setClassLoader(ClassLoader classLoader) { this.classLoader = classLoader; } public Class<?> getClazz() { return this.clazz; } public String getPath() { return path; } public boolean equals(Object object) { if (!(object instanceof ClassPathResource)) { return false; } ClassPathResource other = (ClassPathResource) object; return this.path.equals(other.path) && this.clazz == other.clazz && this.classLoader == other.classLoader; } public int hashCode() { return this.path.hashCode(); } public String toString() { return "ClassPathResource[path=" + this.path + "]"; } }
Special to the Daily/Jeff AndringaTeam Breck's Bruce Perry, Jr., perfects his back layout on a the water ramp in Steamboat Springs. Perry and a few teammates trained at the facility to prepare for the upcoming mogul skiing season. For many athletes, the ski season starts long before the first snow falls. And with the slopes dry and the lifts halted, a group of local skiers were in Steamboat Springs this summer, busy perfecting their jumps for the upcoming mogul season. On both trampolines and water ramps, Dylan Walczyk, Bruce Perry, Jr., and Alisha Scheifley all were looking to take their jumping to a new level. All three are returning members of the Team Breckenridge Sports Club’s mogul program, coached by John Dowling, and plenty more of their teammates were also at the sessions. More than anything, Dowling said the off-snow sessions are about safety and knowledge. Skiers have to get certified with each aerial maneuver in order to compete the trick in sanctioned events. The certification process starts with being approved on a water ramp, before having to do it again on snow at Winter Park in December. The entire process is designed to prevent skiers from attempting tricks that haven’t been adequately practiced, often requiring nearly 100 ramp launches to get the maneuver down. Team Breck is a ski and snowboard program for all ages and abilities, but mostly centers on the development of junior athletes, many of which compete at the highest level of youth skiing. Dowling said a focus of the team this year is to continue to grow its grassroots-level program of athletes between the ages of 8 and 12. At the sessions this summer, Dowling said the team had kids as young as 8 hitting the ramps. It was some of the older kids, though, that used the training for what they hope will be a springboard to a successful competitive season. Walczyk, Perry and Scheifley are three of the teams top returners this season, as all of them were medalists at the Junior Olympics last year in Waterville, N.H.. Walczyk will be hitting a new cork-720, while both Perry and Scheifley are looking to get their back layouts certified. While it may seem like Team Breck was getting in some extra offseason work, Dowling said the training was essential to his athletes’ progressions. For more information on the team or to join, contact Dowling at [email protected] or (970) 390-6471. To celebrate the Team Breck mogul squad’s one year anniversary as a program, the team is holding a fundraiser at the Kickapoo Tavern in Keystone’s River Run Village on Oct. 1. Starting at 6 p.m., there will be a buffet dinner, beer from local breweries, live music from High 5 and an auction. A $10 donation is asked for attendance. The auction includes skis, sporting goods, golf rounds, clothing, gift certificates to local restaurants an, a ski pass and even a trip to Maui.
/* C Program to find sum of elements in a given array */ #include <stdio.h> // function to return sum of elements // in an array of size n int sum(int arr[], int n) { int sum = 0; // initialize sum // Iterate through all elements // and add them to sum for (int i = 0; i < n; i++) sum += arr[i]; return sum; } int main() { int arr[] = {12, 3, 4, 15}; int n = sizeof(arr) / sizeof(arr[0]); printf("Sum of given array is %d\n", sum(arr, n)); return 0; }
New York Knicks rookie Allonzo Trier has played well overall through his first 25 games. There are certain areas David Fizdale wants him to work on. BOSTON — Allonzo Trier started the season on a two-way contract, leaving open the possibility that the undrafted rookie out of Arizona could spend some time in the G-League. Trier didn't expect that to be the case. "I’d be lying if I said I didn’t expect to be here the whole time," Trier said after the Knicks' shootaround Thursday at TD Garden. "It is what it is. That was my mindset. That’s kind of how I believed in myself and I knew once I’d be here I’d be fine." For the most part, Trier has been fine. The 22-year-old has gone through some ups and downs through his first 25 NBA games, but overall he's impressed with his scoring ability. There are still steps coach David Fizdale wants Trier to take as he develops. Going into the Knicks' game against the Boston Celtics on Thursday, Trier was averaging 11.3 points on 47.3 percent shooting and 40.9 percent from the perimeter. "A learning process for me," Trier said of his season so far. "The best way to continue to get better and learn is through experience and I think that’s what I’ve been going through. I’m going through good days and not so good days. But allowing myself to improve and learn new things every single game, no matter if I play great, or maybe it’s a not so great night. But I’m actually getting better every single time I take the floor." The more he plays and the more teams know about him, the tougher it's been for him to score. Teams have started to key in on him more, at times throwing double teams or their best defenders at him. That's been an adjustment Trier is trying to respond to. "That’s one thing I’m going through and that’s what happens when you do some good things and earn the respect of the other teams," Trier said. "It’s kind of rare to have that as a rookie." In his previous three games going into Thursday, Trier was averaging just 6.3 points on 26.1 percent shooting. He was 0-of-5 from the perimeter. Trier, whose nickname is "Iso Zo," does much of his work in isolation. That runs counter to Fizdale's offensive philosophy of ball movement and quick decisions, but he's allowed Trier to play his style within the offense. It hasn't always worked out. But that's related to what Fizdale wants Trier to work on. "It’s becoming an overall playmaker," Fizdale said. "Bigger than just a scorer where he really makes other people better around him." Trier has shown some improvement in that area. Through his first 21 games, he averaged just 1.6 assists. In his previous four before Thursday, he was averaging 3.5. That's all part of what Fizdale believes Trier's next steps should be. "Being a guy that can be a lock-down defender, one on one," Fizdale said. "Because he really has a great ability to move his feet. And then obviously I’m just beating rebounding over his head because I like when he gets the ball in his hands early and attacks. Those are the areas we’ve really been harping on with him. I’d still like to see him let go of a few of those threes and let them fly instead of shot faking." Another big issue now is his contract. On his two-way deal, Trier is allowed to spend a maximum of 45 days with the Knicks. That clock began when G-League training camps opened. The time frame is about to expire. The Knicks will need to sign him to an NBA contract and clear a roster spot to keep him. Trier has played well enough to get the opportunity to stay. But there are still improvements he's trying to make. "I’m looking forward to continuing to keep learning, continuing to get better and add to my game," Trier said.
// Returns a mapping for the step indices and the size of the new step_positions file. func mapSteps(g *GraphFile, edgeIndices []int) int { size := 0 for e := 0; e < g.EdgeCount(); e++ { if edgeIndices[e] == -1 { continue } size += int(g.Steps[e+1] - g.Steps[e]) } return size }
SS Palo Alto History Palo Alto was mothballed in Oakland until 1929, when she was bought by the Seacliff Amusement Corporation and towed to Seacliff State Beach in Aptos, California. A pier was built leading to the ship in 1930, and she was sunk in a few feet in the water so that her keel rested on the bottom. There she was refitted as an amusement ship, with amenities including a dance floor, a swimming pool and a café. The company went bankrupt two years later during the Great Depression, and the ship cracked at the midsection during a winter storm. The State of California purchased the ship, and she was stripped of her fittings and left as a fishing pier. She was a popular site for recreational fishing, but eventually she deteriorated to the point where she was unsafe for this purpose, and she was closed to the public in 1950. Following an attempt at restoration in the 1980s, she reopened for fishing for a few years, then closed again. The fishing pier opened to foot traffic once again in the summer of 2016, but later closed for repairs. Nicknamed the "Cement Ship", Palo Alto today remains at Seacliff Beach and serves as an artificial reef for marine life. Pelicans and other seabirds perch on the wreck, sea perch and other fish feed on algae that grow in the shelter of the wreck, and sea lions and other marine mammals visit the wreck to feed on the fish. In the spring of 2005, oil found on wildlife nearly two years earlier, killing dozens of seabirds, was traced back to the ship, whose fuel tanks had cracked and were leaking fuel oil. In September 2006, a clean-up project was started that cost an estimated $1.7 million, approximately the cost of the original construction of the ship in 1919. No oil is known to have spilled directly into the ocean, but wildlife experts believe that birds came into contact with oil by entering the ship's cracked hull while diving underwater for fish; during the clean-up, workers pumped 500 U.S. gallons (416 Imperial gallons; 1,893 liters) of oil from the ship and discovered the carcasses of 200 more birds and two harbor seals inside the wreck. The ship continued to deteriorate after the clean-up. While she had over the decades been broken into four roughly segmented pieces, winter storms in February 2016 pushed the wreck onto her starboard side and broke her rear half open. On January 21, 2017, another winter storm tore the stern off the ship.
module cz.cuni.mff.java.mods.printers { exports cz.cuni.mff.java.mods.printers.external; }
Event Generator Benchmarking for Proton Radiography Applications We have benchmarked the QGSM code and event generators of the MARS and LAHET3 codes as potential candidates for high-energy programs to be used in simulations for the Proton Radiography (PRad) Project. We have compiled from the literature experimental data on spectra of particles emitted from proton-induced reactions at incident energies from 30 GeV to 70 GeV on different nuclei and have performed calculations for all reactions for which we found data with these three codes without any modifications and using only default parameters and standard inputs. Our results (514 plots) show that all three codes describe reasonably most of the studied reactions, though all of them should be further improved before becoming reliable tools for PRad. We present here our conclusions concerning the relative roles of different reaction mechanisms in the production of specific secondary particles. We comment on the strengths and weaknesses of QGSM, MARS, and LAHET3 and suggest further improvements to these codes and to other models. Introduction The process of determining the feasibility of Proton Radiography (PRad) as the radiographic probe for the Advanced Hydrotest Facility as well as its design and operation require information about spectra of secondary particles produced by high energy protons interacting in the target and structural materials. Reliable models and codes are needed to provide such data. We studied the literature and chose three potential candidates for high-energy codes that may be used in simulations for PRad, namely the Quark-Gluon String Model (QGSM) as developed by Amelin, Gudima, and Toneev, the MARS code by Mokhov et al., and a version of the Los Alamos National Laboratory (LANL) transport code LAHET, known as LAHET3. The energy of the proton beam at PRad is supposed to be about 50 GeV. Unfortunately, there are very few measurements of particle spectra for proton-induced reactions exactly at 50 GeV or very close energies. In fact, we found only one published work at 50 GeV, namely spectra of − and + measured at 159 from p(50 GeV) + W published in Russian together with pion spectra for other energies and targets, in a Joint Institute for Nuclear Research (Dubna) Communication by Belyaev et al.. With only a few data available at 50 GeV, we benchmarked QGSM, MARS, and LAHET3 against measured spectra of particles emitted from interaction of protons with energies 50 ± 20 GeV, i.e., from 30 to 70 GeV, with all targets for which we found experimental data. Independently of how many spectra were measured in an experiment, we performed calculations with the standard versions of QGSM, MARS, and LAHET3 without any modifications or adjustments, using only default parameters in the input of codes, and calculated double differential cross sections at 0, 4.75, 9,13,20,45, 60, 90, and 159 degrees, angle-integrated energy spectra, and mean multiplicities for emission of n, p, d, t, 3 He, 4 He, +, −, K +, K −, andp for all cases listed below in Table 1. The next Section presents a brief description of the benchmarked codes, followed by results, discussion, and conclusions in the last two Sections. Benchmarked Codes QGSM: The core of the QGSM is built on a time-dependent version of the intranuclear cascade model developed at Dubna to describe both particle-and nuclei-induced reactions, often referred in the literature simply as the Dubna intranuclear Cascade Model (DCM) (see and references therein). The DCM models interactions of fast cascade particles ("participants") with nucleon spectators of both the target and projectile nuclei and includes interactions of two participants (cascade particles) as well. It uses experimental cross sections (or those calculated by the Quark-Gluon String Model for energies above 4.5 GeV/nucleon) for these elementary interactions to simulate angular and energy distributions of cascade particles, also considering the Pauli exclusion principle. When the cascade stage of a reaction is completed, QGSM uses the coalescence model described in to "create" high-energy d, t, 3 He, and 4 He by final state interactions among emitted cascade nucleons, already outside of the colliding nuclei. After calculating the coalescence stage of a reaction, the QGSM moves to the description of the last slow stages of the interaction, namely to preequilibrium decay and evaporation, with a possible competition of fission using the standard version of the Cascade Exciton Model (CEM). But if the residual nuclei have atomic numbers with A ≤ 13, QGSM uses the Fermi break-up model to calculate their further disintegration instead of using the preequilibrium and evaporation models. MARS: The MARS Monte-Carlo code system, being developed over 29 years, allows fast and reliable inclusive and exclusive simulation of three-dimensional hadronic and electromagnetic cascades in shielding, accelerator and detector components in the energy range from a fraction of an electron-volt up to about 100 TeV. It is under continuous development. The reliable performance of the code has been demonstrated in numerous applications at Fermilab, CERN, KEK and other centers as well as in special benchmarking studies. Description of elastic and inelastic hN, hA, A and A cross sections is based on the newest compilations and parameterizations. At high energies (5 GeV<E<100 TeV), tot, in, prod and el are calculated in the framework of the Glauber multiple scattering theory with the hN as an input. The nucleon density distribution in nuclei is represented as the symmetrized Fermi function with the parameters of for medium and heavy nuclei (Z > 10) and the ones of for Z < 10. Modern evaluated nuclear data as well as fitting formulae are used to simulate hadron-nucleus elastic scattering. For protons, nuclear, Coulomb elastic scattering, and their interference is taken into account. At E>5 GeV, a simple analytical description used in the code for both coherent and incoherent components of d/dt is quite consistent with experiment. A version of the Cascade-Exciton Model of nuclear reactions as realized in the code CEM95 and containing also several recent refinements is now implemented in the 1998 version of MARS as default for 1-10 MeV < E < 3-5 GeV. A set of phenomenological models, as described in Ref., is used for inclusive production of secondary particles in hA, dA, A and A interactions at projectile energies from 5 GeV to 100 TeV. The 2001 version of the MARS code was employed in the present benchmark. LAHET3: LAHET is a Monte-Carlo code for the transport and interaction of nucleons, pions, muons, light ions, and antinucleons in complex geometry ; it may also be used without particle transport to generate particle production cross sections. LA-HET allows one to choose one of several options for the Intra-Nuclear Cascade (INC) and fission models to be employed in calculations; it is widely used and well known in the applied nuclear physics community; therefore, we do not describe it here (a comprehensive description of LAHET may be found in and references therein). The version of LAHET realized in the code LAHET3 uses a version of the code FLUKA, known in the literature as FLUKA96 to describe the first, INC stage of reactions, and its own Multistage Preequilibrium Model (MPM) to describe the following intermediate preequilibrium stage, followed by evaporation/fission slow processes (or by the Fermi break-up model after the cascade instead of preequilibrium and evaporation/fission, if the residual nuclei have atomic numbers with A ≤ 13 and for 14 ≤ A ≤ 20 with excitation energy below 44 MeV), as described in. We mention again that only the high-energy event generator from FLUKA96 is employed here, as implemented in LAHET3; the de-fault preequilibrium, evaporation and Fermi break-up models of LAHET3 are used for low energy nucleon and complex particle emission. More details and further references on LAHET3 with FLUKA96 can be found in. Table 1 lists the cases we calculated with QGSM, MARS, and LAHET3, and provides references to experimental works where at least one spectrum of a secondary particle (from the ones listed in Introduction) was measured. A detailed report of the study containing 514 plots with spectra and multiplicities of secondary particles from reactions listed in Tab. 1 is now in preparation. Here, we present only our main conclusions and several Our analyses have shown that all three codes tested here describe reasonably most of the secondary particle spectra. As a rule the higher the incident proton energy, the better the calculated spectra agree with experimental data. Several reaction mechanisms participate in the production of secondary nucleons and complex particles. These mechanisms are: 1) Fast INC processes; 2) preequilibrium emission from residual nuclei after the cascade stage of reactions; 3) evaporation of particles from compound nuclei after the preequilibrium stage, or from fission fragments, if the compound nucleus was heavy enough to undergo fission; 4) Fermi break-up of light excited nuclei formed after the cascade stage of reactions; 5) coalescence of complex particles by final state interactions among emitted cascade nucleons; 6) fast complex particle emission via knock out and pick up processes; 7) Multifragmentation of highly-excited residual nuclei after the INC. Their relative roles change significantly with the changing atomic mass number of the targets, and are different for different energies and angles of emission of secondary particles. Different codes describe these spectra better, worse, or do not describe them at all, depending of how these reaction mechanisms are (or are not) implemented into a specific code. Results and Discussion As an example, Fig. 1 shows spectra of p, d, t, and − emitted at 9.17 deg from the reaction p(70 GeV) + 208 Pb. Results for other reactions at 70 GeV are similar. One can see that all three codes describe the proton spectra well. The agreement for the pion spectra is not so good but is still reasonable, with some underestimation of the high-energy tails of spectra by QGSM and some overestimation by MARS. Note that as the angle of pion emission changes the situation is reverses: we observe that most of the high-energy tails of pion spectra at 159 deg, and to a lesser extent at 90 deg, are over-predicted by LAHET3 and underestimated by MARS. The situation with the deuteron and tritium spectra is quite different. We see that deuterons with momentum of up to about 15 GeV/c and tritium with momenta up to 19 GeV/c are emitted and measured in this particular reaction. Utilizing the coalescence mechanism for complex particle emission, QGSM is able to describe high-energy deuteron production, and agrees well with the measurement. LAHET3 does not consider the coalescence of complex particles and therefore describes emission of only evaporative and preequilibrium deuterons with momenta not higher than 1 GeV/c. MARS does not consider emission of complex particles at such high incident proton energies, therefore no d and t spectra by MARS are shown in Fig. 1. For tritium, the situation is worse since LAHET3, as is the case of deuterons, describes only preequilibrium emission and evaporation of tritons with momenta not higher than 1 GeV/c and QGSM, even taking into account coalescence of tritium, describes emission of t from this reaction up to only 2.5 GeV/c while the experimental spectrum of t extends to 19 GeV/c. This deficiency can be understood by considering the coalescence mechanism: It is much more probable to emit two cascade nucleons with very similar momenta that can coalesce into a deuteron than to get three INC nucleons with very similar momenta that can coalesce into a triton. The experimental values of high-energy triton spectra are several orders of magnitude below the corresponding values of the deuteron spectra, and the statistics of our present QGSM simulation could be simply too small to get such high-energy tritium via coalescence. There is also a possibility that knock out processes of preformed clusters (or fluctuations of nuclear density, leading to "fluctons") by bombarding protons are seen in these experimental d and t spectra, but are not taken into account by any of the tested codes, providing the observed difference in the t spectrum and less pronounced, in the d spectrum. A third possible mechanism of complex particle emission with greater than 1 GeV/c momenta would be multifragmentation of highly-excited residual nuclei after the INC. This mechanism is not taken into account by any of the tested codes and we cannot estimate its contributation. Fig. 2 shows examples of + spectra at 159 deg from 51 GeV proton collisions with 9 Be and 48 Ti. As already mentioned above for −, we see that LAHET3 overestimates the high-energy tails of pion spectra and MARS underestimates them a little. Similar results were obtained for other targets and incident proton energies. Fig. 3 shows an example of how calculated proton spectra depend on the angle of emission, for the reaction p(30 GeV) + 9 Be. We see that at 30 GeV, the agreement of calculated proton spectra with the data is not so good as we have in Fig. 1 for 70 GeV. The shapes and absolute values of proton spectra predicted by different codes depend significantly on the angle of detection, as does the agreement with the data. Similar results were obtained for other secondary particles and for other targets and incident energies. Figure 1. Invariant cross sections Ed 3 /d 3 p for forward production of p, d, t, and − at 160 mrad (9.17 deg) as functions of particle momentum p from 70 GeV protons on 208 Pb. Experimental data for p and − are from Tab. 1 of Ref. and for d and t, from Ref.. Calculations by QGSM, LAHET3, and MARS are shown as indicated in the legends. 4 shows an example of mean multiplicity of secondary n, p, d, and + predicted by the tested codes for interaction of protons of about 50 GeV with different nuclei as functions of the mass number of targets. We see that predicted particle multiplicities differ significantly from each other, and the differences increase with increasing mass number of the target. The observed differences point to a quite significant difference in the treatment by the codes of the cascade stage of reactions (pions are emitted only at the cascade stage of reactions) and of the subsequent preequilibrium, evaporation, and Fermi break-up stages as well (we recall that at these incident energies MARS uses its own approximations for the total particle spectra without considering separately contributions from different mechanisms of nuclear reactions). These differences indicate that further experimental data are necessary at these incident proton energies and further development and improvement of the codes is required. Further Work Our study shows that all three codes describe reasonably well many of the secondary particle spectra analyzed here, though all of them should be further improved before becoming reliable tools for PRad. For instance, we find that QGSM has some problems in a correct description of several pion spectra and does not describe sufficiently the high-energy tails of measured t and 3 He spectra. Nevertheless, QGSM is the only code tested here that accounts for coalescence of MARS overestimates the high-energy tails of some pion, kaon, and proton spectra at small angles (4.75, 9, and 13 ) and underestimates them a little at large angles (90 and 159 ). At these incident energies, MARS does not calculate complex particle production. However, MARS has one significant advantage in comparison with the two other codes: It is several orders of magnitude faster and requires almost no computing time, providing meanwhile reliable results for many applications. LAHET3 overestimates the high-energy tails of practically all pion spectra at 159 and some nucleon and complex particle spectra in the preequilibrium energy region. It does not consider coalescence of complex particles and does not describe production of high-energy complex particles. We observe also big differences between predicted high-energy tails of both neutron and proton spectra at 0 and for the mean multiplicities of almost all secondary particles, though no experimental data for these quantities are available at present for the reactions studied here. We note that many of the problems we observe in our study for particular codes have already been solved, since all benchmarked event generators are under continuous development and improvement and all of them have been further improved in comparison with the versions we use in this study. On the basis of QGSM, we have developed the Los Alamos version of the Quark-Gluon String Model code, LAQGSM. LAQGSM differs from QGSM by replacing the preequilibrium and evaporation parts of QGSM described according to the standard CEM with the new physics from CEM2k and has a number of improvements and refinements in the cascade and Fermi break-up models. Originally, both QGSM and LAQGSM were not able to describe fission reactions and production of light fragments heavier than 4 He, as they had neither a high-energy-fission nor a fragmentation model. Recently, we addressed these problems by further improving CEM2k and LAQGSM and by merging them with the Generalized Evaporation Model code GEM2 developed by Furihata. The improved LAQGSM+GEM2 code describes both spectra of secondary particles and yields of produced nuclides much better than QGSM does; exemplary results by LAQGSM and further references may be found in. The MARS code system is being continuously developed and improved. For instance, a new version of the code, MARS14 was completed after we started the present work. It contains a large number of improvements and refinements and provides better results in comparison with the version used here. Recently, the authors of MARS started to develop new and better approximations for the double differential cross sections of inelastic hN and hA interactions. The new systematics allow to solve the mentioned above problems with the pion, kaon, and proton spectra at forward and large angles and describe the experimental data much better. The FLUKA code has also been updated very significantly (see e.g., and references therein) since the version FLUKA96 was incorporated into LAHET3 as used here; no updated version is yet incorporated into LAHET. Our study points to the importance of taking into account coalescence in high-energy complex-particles production. We find it appropriate and easy to implement these processes into MARS and LAHET, as well as into other codes that do not now consider coalescence. We think that at such high incident energies, multifragmentation of highly-excited heavy nuclei may also be of significance and should be taken into account in these event generators and in other codes.
How to achieve better outcome in treatment of asthma in general practice. The symptoms of many asthmatic patients are poorly controlled, and there are several reasons why this may be so. Doctors fail to find out about symptoms that asthmatic patients are experiencing. Doctors wrongly assume that regular use of bronchodilators in small doses is satisfactory treatment for asthma and that taking high doses of bronchodilator in an asthma attack may be dangerous. Doctors think that inhaled steroids may be dangerous and are reluctant to use them in effective doses. Doctors do not check that patients can use their inhalers properly and do not make enough use of large volume spacers, the best available method for giving inhaled asthma treatment. Doctors undermine patients' confidence in advice on treatment by failing to ensure that consistent advice is given and often make the management of asthma more troublesome for the patient than the symptoms of asthma.
Einselection from incompatible decoherence channels Decoherence of quantum systems from entanglement with an unmonitored environment is to date the most compelling explanation of the emergence of a classical picture from a quantum world. While it is well understood for a single decoherence channel, the role in the einselection process of a complex system-environment interaction remains to be clarified. In this paper, we analyze an open quantum dynamics inspired by CQED experiments with two incompatible decoherence channels. We study and solve exactly the problem using quantum trajectories and phase-space techniques. The einselection optimization problem is studied numerically and we show that Fock states remain the most robust states to decoherence up to a critical coupling. I. INTRODUCTION The strangeness of the quantum world comes from the principle of superposition and entanglement. Explaining the absence of those characteristic features in the classical world is the key point of the quantum-to-classical transition problem. The fairly recent theoretical and experimental progresses have largely reshaped our understanding of the emergence of a classical world from quantum theory alone. Indeed, decoherence theory has greatly clarified how quantum coherence is apparently lost for an observer through the entanglement of the system with a large unmonitored environment. At the same time, this interaction with the environment allows to understand the emergence of specific classical pointer states, a process called einselection. However, in the case of complex environments, the physics of decoherence can be much more involved. It is then necessary to consider the structure of the environment. A recent approach exploring this path, called quantum Darwinism, considers an environment composed of elementary fragments accessing a partial information about the system. While a lot of insights were originally model-based, it has been proved that some basic assumptions of the quantum Darwinism approach, like the existence of a common objective observable, come directly from the theoretical framework of quantum theory. Another possible source of complexity comes from the possibility that the environment can measure different observables of the system, potentially incompatible, at the same time, leading to many incompatible decoherence channels. Interesting physical effects are coming from the subtle interplay between those incompatible channels. For instance, the physics of a quantum magnetic impurity in a magnetic medium can be modeled as an open quantum system problem in which the environment probes all three Pauli matrices, instead of only one as in the standard spin-boson model. * Electronic address: [email protected] Electronic address: [email protected] Depending on the relative values of the coupling constants, different classical regimes emerge at low energy with one channel dominating the others. What's more, for some fine tuned cases, the more interesting phenomena of decoherence frustration occurs where all channels contribute to suppress the loss of coherence of the system at all energy scales. This kind of subtleties in the einselection process have also been studied in circuit QED models where, once again, the emergent classical picture is strongly dependent on the structure of the environment and the relative strength of its decoherence channels. Thus, in the case of several incompatible decoherence channels, the problem of the emergence of a privileged basis is far from trivial. In this paper, we analyze the einselection process of a system in contact with two competing decoherence channels inspired by cavity QED experiments. The system is thought to be a mode of the electromagnetic field confined in a high quality cavity. The two sources of decoherence comes for the imperfections of this cavity and the atoms used to probe the field that are sent through the cavity. We base our analysis on the Lindblad equation to describe the effective open quantum dynamics. The problem is solved exactly using the quantum trajectory, the characteristic function and the quantum channel approaches allowing to properly characterize the different regimes and timescales of the problem. Still, the einselection problem, which is about finding the most robust (approximate) states to the interaction with the environment, has to be solved numerically, by looking at the short-time evolution of the open dynamics. Depending on the relative values of the coupling constant, we find the intuitive result that the einselected states interpolate between Fock and coherent states. However, we uncover the remarkable fact that Fock states remain exactly the optimal einselected states up to a critical value of the couplings that can be obtained analytically. Finally, in the long-term dynamics, the einselection process appears to be much more complicated. The paper is structured as follows. In Section II, the model we wish to analyze is presented and derived from cavity quantum electrodynamics experiments. The core of the results are presented in Section III where the model arXiv:2001.10851v1 29 Jan 2020 is solved exactly. Section IV presents numerical evaluation of the Wigner function of the system which are used to properly analyze the different einselection regimes. We conclude in Section V, by discussing how our analysis could be generalized and understood from a more abstract perspective through the algebraic structure of the jump operators. II. MOTIVATIONS FROM CQED By managing to entangle a single mode of the electromagnetic field and an atom, cavity quantum electrodynamics (CQED) is a nice experimental setup to explore the foundations of quantum theory, quantum information and the physics of the quantum to classical transition. One implementation of CQED uses an electromagnetic mode trapped in high quality factor mirrors as a system which is probed by a train of atoms acting as twolevel systems (qubits) conveniently prepared. The setup can work in different regimes where the qubit is in resonance or not with the field. The off-resonance functioning mode, also called the dispersive regime, is particularly interesting since the atoms can be thought as small transparent dielectric medium with respect to the field with an index of refraction depending on its state. The atom is able to register some phase information about the field making it a small measurement device. In fact, a train of atoms can be thought as a non-destructive measurement device probing the number of photons in the field. Focusing our attention on the field itself, the atoms have to be considered as part of the environment of the field (even if it is well controlled by the experimentalist) giving rise to a decoherence channel having the Fock states as pointer states. Similar experiments can also now be performed on circuit QED platforms. However, a second source of decoherence exists, not controlled by the experimentalist this time, and has its origin in the imperfections of the cavity. Indeed, while the quality factor is high enough to see subtle quantum phenomenon, photons can still get lost over time in the remaining electromagnetic environment. This second decoherence channel leads to a decoherence of the field mode on the coherent state basis of the electromagnetic field, and introduces dissipation. We thus have two natural decoherence channels in this experimental setup, one which selects photon number states and the other coherent states. However, those two basis are incompatible in the sense that they are associated to complementary operators, the phase and number operators. This strong incompatibility between those classical states motivates the question on which classical pictures, if any, emerge from such a constrained dynamics. Before analyzing this problem in details, let's first put the previous discussion on firmer ground by deriving an open quantum system dynamics of the Lindblad form. We use a Born-Markov approximation of an effective FIG. 1: Dynamics of the cavity. The cavity is coupled to its own electromagnetic environment and to an atom, itself coupled to its own electromagnetic environment. We suppose that the two electromagnetic baths are not coupled. Eventually, the coupling of the cavity to its bath leads to photon losses. The coupling with the atom, in the dispersive regime, leads to decoherence on the photon basis. Hamiltonian describing a CQED experiment represented on Fig. 1. As a starting point, consider the following Hamiltonian: The system of interest is the mode confined in the cavity. Its free Hamiltonian H S is just the free harmonic oscillator at frequency c. The cavity mode is coupled to two other systems: 1. The atom at frequency eg. It is coupled to the cavity such that we are in the dispersive regime, giving the first term of H SE. The coupling constant (t) is given by g 2 (t)/4∆ where g(t) is the coupling constant in the Jaynes-Cummings model and ∆ = eg − is the dephasing between the qubit and the cavity. This atom is subjected to an electromagnetic environment described by the harmonic modes ( k ) coupled to it by a spinboson model. Both the atom and its electromagnetic environment form the first decoherence channel. 2. Other harmonic modes (c) k describing the electromagnetic environment of the cavity. In the secular approximation, this environment leads to photon losses in the cavity. This is the second decoherence channel. The first decoherence channel is given by the atom passing through the cavity. Its effect on the field is char-acterized by a correlation function of the form: We remark that, in this case, the correlation function is independent of the state of the qubit. We will rewrite the correlation function in terms of the vari- (t, t ) satisfies the usual assumptions of the Born-Markov approximation scheme (stationnarity and fast decay), the operators appearing in the Born-Markov equation are of the form+ ∞ 0 G (a) (t, ) e −i d N. From there follows a Lindblad equation with the jump operator L n () = 2G (a) (−)N. Unfortunately, it is not possible to satisfy those requirement for the function (t) (indeed, stationarity implies that should be a phase but, being real, it must be a constant which cannot satisfy the decay requirement). Thus, the time dependence must be kept in full generality in this model, breaking the usual stationarity assumption. This is not a problem for the derivation of a master equation of the Lindblad form, the only difference being that the rates will be time dependent. Still making the assumption of fast decay in the time coordinate, the usual steps of the derivation can be followed. We then end up with a timedependent Lindblad term proportional to the operator N : L n (t, ) = 2G (a) (t, −)N. A sufficient condition for the fast decay assumption to be valid is to have an exponential decay. This is the case in the Rydberg atoms experiment where the coupling constant follows the Gaussian beam in the cavity. If we send a stream of atoms, we still have to make sure that the assumption of fast decay in is valid. This implies that the atoms must be sent as a group to have the exponential decay of the correlation function. The second decoherence channel comes from the leaky cavity and does not present any analytical difficulties. Supposing that those degrees of freedom are at equilibrium and at zero temperature, the Born-Markov equation then reduces to a Lindblad term with the jump operator L a () = 2G (c) (−) a with G (c) the correlation function of the environment field modes at zero temperature. Before going to the main analysis of the model, two remarks have to be made. The first remark is that if we were to prepare the experimental setup in the resonant regime, the atom-field interaction is modified in such a way that the atom can emit or absorb a photon from the mode. Staying in the Markovian regime would result in a dissipative dynamics equivalent to a photon emission and absorption albeit with very different transition rates. Thus, running the experiment in the resonant regime will not result in the dynamics we are interested in. The second one refers to the system-environment cut and the Markovian hypothesis. Here, we chose to consider as the system only the field mode, all the other degrees of freedom then being part of the Markovian environment. However, as it was done in, it is also possible to consider as the system the field and the qubit while all the other electromagnetic field modes form the Markovian environment. Different decoherence channels acting on the qubit and/or on the field can be considered while the non-trivial internal Jaynes-Cummings dynamics adds another level on complexity. A. Position of the problem The previous discussion shows that it is possible in principle to engineer a complex open quantum dynamics with two incompatible decoherence channels. Building on this motivating example, the problem we are interested in is to understand the einselection process of a field mode modeled by a simple harmonic oscillator subject to two decoherence channels probing two incompatible observables on the field. Abstracting ourselves from the CQED context, we consider from now on an open quantum dynamics modeled by a Lindblad equation with two time-independent jump operators: Those two operators do not commute with each others and therefore can't be simultaneously diagonalised. The incompatibility between decoherence channels we are referring to has to be understood in this sense. Still, their commutator ∝ L a remains simple enough and this is the key to find an exact solution. Given the two quantum jumps L a and L n, the Lindblad equation we want to analyze is: where N is the number operator and c the frequency of the field mode. To gain some intuition on the physics behind this dynamics, let's study the evolution of some average values. We will focus on the average position in phase space, which can be recovered directly from the average value of the annihilation operator a, as well as the variance around this position which are related to the average photon number N and the square of the annihilation operator a 2. From those values, we can extract the average position and the fluctuations on an arbitrary axis x. The observable N is in this case of particular interest. First of all, it gives access to the average energy of the cavity. Second, it is not affected by the proper dynamics of the cavity nor by the L n jumps. A direct computation gives the standard exponential decay of a damped harmonic oscillator with rate a : This solutions gives the intuitive result that photon loss induces a decrease of the average number of photons in the cavity. On the contrary, both decoherence channels affect the average values of the annihilation operator a and its square: We see that the average position in phase space oscillates at the frequency of the cavity and is exponentially suppressed at a rate ( a + n )/2, different than the decay rate of the average energy. We can thus expect that the dynamics will lead to some increase in the fluctuations at intermediate timescales, when the cavity is not empty. To put this statement on firmer ground, it is instructive to compute the average fluctuations along an axis x making an angle with the horizontal in phase space. A direct computation gives: The first term 1/2 remains even when the cavity is empty and corresponds to the fluctuations of the vacuum. It is also the minimal isotropic fluctuations that satisfy the Heisenberg principle. The second term corresponds to isotropic fluctuations above the vacuum state and are, for instance, zero for coherent states but not for a thermal density matrix. Finally, the last term is the anisotropic part of the fluctuations and can be either positive or negative. Even though the overall fluctuations must satisfy Heisenberg inequalities, states can exhibit fluctuations below the vacuum ones in some directions. This property is called squeezing and is of particular interest for quantum technologies. If the initial state at t = 0 is a Fock state |n 0, only the isotropic contribution N (t) = n 0 e −at survives. In this case, the fluctuations are decreased solely because of the loss of energy in the cavity. This is due to the fact that the L a channel does not create any coherences between different Fock states, while the L n channel does not affect statistical mixtures of Fock states. However, the situation is more subtle if we prepare a coherent state. A coherent state | such that = √ n 0 e i gives the values N = n 0 and a 2 = n 0 e 2i. The fluctuations for such a state are evolving according to: As in the Fock state case, the fluctuations are exponentially suppressed in time as the cavity looses energy with a rate a. However, the situation is more interesting, because of the competition between both channels on the isotropic part of the fluctuations: on top of the exponential suppression, the L n channel tends to increase fluctuations up to the maximum possible for the average energy still stored in the cavity. As we will see later, this comes from the fact that the coherences between different photon numbers are suppressed over a timescale 1/ n. The anisotropic part of the fluctuations, in this case, is always smaller than the isotropic part over the vacuum. The fluctuations will thus stay always above the vacuum fluctuations in any directions: this dynamics does not exhibit squeezing. At short times, however, the fluctuations behave as ∆x (t) (1+n 0 n t sin 2 (− c t+/2))/2. They stay identical in the axis given by (0, ), but are increased with a rate n 0 n in the orthogonal direction. At longer times, the anisotropic fluctuations decay, with an exponential rate ( a + n ), faster than the isotropic decay rate a. Those timescales are very important to properly follow the einselection process and we will analyze them in detail after having obtained the general solution. B. Quantum trajectory approach The Lindblad equation can be solved exactly from the quantum trajectory approach. One of the motivations for using this approach is that it is now possible to explore the dynamics of well-controlled quantum systems at the level of a single experimental realization. In this sense, it is closer to the latest experiments studying decoherence. The strategy will be to first write the stochastic Schrdinger equation for the system, parametrise properly a trajectory, write its relative state and finally average over all possible trajectories to obtain the reduced density matrix. The stochastic Schrdinger equation associated to the Lindblad Eq. can be written as: where (t) takes the values 0, 1, and 2 if there is no quantum jump, a jump L a or a jump L n respectively. A trajectory will be then parametrized by the type of jump and their occurrence time. We stress that the state | c (t, ) is not normalized, the probability of the trajectory t → (t) being given by To have a better intuition of how a state evolves, let's start from a Fock state |m. Since it is an eigenstate of a a, the trajectories = 0 and = 1 do not change the state and only induce a phase. However, the jump = 2 induces a photon loss and the state is changed into |m − 1 with a phase. Thus, a proper parametrization of a trajectory is to slice the evolution according to the L a jumps, as shown in Fig. 2. There, a trajectory is parametrized by: N a jumps L a, indexed by the letter, occurring at times, N jumps L n in the slice, indexed by the letter s, occurring at times t, FIG. 2: Parametrization of a quantum trajectory build from two types of quantum jumps La = √ a and Ln = √ n a a. The times correspond to the La jumps and the quantum state between each jump is above each slice. For a given slice ∈ where k+1 = t and 0 = 0, the relative state obtained from Eq. is proportional to the Fock state |m −. By denoting (t, ) the proportionality constant, we find: Having the formal form of the state of the system relative to a given quantum trajectory, we can obtain the reduced density matrix by summing over all possible trajectories. Our parametrization is such that we can resum all the phases accumulated between each L a jumps easily and then average over all L a jump events. For the Fock state |m, only the diagonal elements of the reduced density matrix can be non zero. No coherences are induced since a Fock state remains a Fock state and no superposition appears. This immensely simplifying feature comes directly from the special commutation relation of the jump operators. We obtain: Starting from a Fock state |m, the state |m − k is reached if k photons have leaked. The associated probability is given by, which is the classical probability for a binomial experience B(m, p), where the probability p for a photon to leak between 0 and t is p = 1 − e −at. We recover the standard evolution of a damped harmonic oscillator prepared in a Fock state. The coupling constant n does not appear in this expression which means that the dephasing induced by an atom flux has no effect on a Fock state. Indeed, we saw that L n jumps induce a dephasing between Fock states. Since a unique trajectory keeps the cavity in a single Fock state, this dephasing is only a global phase shift and thus has no effect at all. Since Fock states form a basis of the Hilbert space of the system, an analogous computation starting from any superposition of Fock states gives the general solution: The physical content of this general exact expression is more transparent if we initially prepare a superposition of coherent states. What's more, those are the typical quantum states that are used in cavity quantum electrodynamics to study the decoherence process. Let's then consider the state | c = |+ +|− √ N where ± = e ±i/2 and N is a normalization factor. The initial density matrix is then given by: where the notation m,n ( + ) refers to the matrix element mn in the Fock basis of the density matrix of the coherent state | +. The last two terms correspond to interferences between the two coherent states. Using Eq., the time evolved density matrix at time t is given by: where ± (t) = ± e −ict e − a 2 t. The effect of the two decoherence channels can be clearly identified. First, the term d a (t) = e −|| 2 (1−e −a t )(1−e i ) is the standard decoherence factor coming from the jump L a = √ a a (damped harmonic oscillator) which tends to destroy coherences between coherent states. On the other hand, the term e −n (m−n) 2 2 t tends to destroy coherences between Fock states which are the natural pointer states of the channel L n = √ n N. The interesting and surprising aspect of Eq. is that, while the Lindblad jump operators do not commute, the overall evolution is, in a sense, decoupled. This can be made precise by using the quantum channels perspective. Indeed, by considering the operators H = −i(H ⊗1−1⊗ H) and D = L ⊗ L − 1 integrate the coupling constants into the jump operators and the bar notation corresponds to complex conjugation), the Lindbald equation can be formally integrated as : The notation L H, (t) is here to remind us that the quantum channel depends on the set of jump operators L through the operator D. In our problem, we have two non-commuting jump operators L a = a and L n = N. Astonishingly, their respective quantum channels do commute, leading to: Thus, while the generator of the open quantum dynamics do not commute, the associated quantum channels do (and decouple in this sense). The full dynamics of the model can be solved by parts by looking at the evolution of both quantum channels separately, which is an easy task. Nevertheless, this does not trivialize the einselection problem of finding the exact or approximate pointer states that entangle the least with the environment. The emergence of a classical picture does depend on the "fine structure" of the dynamics. Two natural decoherence timescales can be defined, still considering the initial state to be the superposition of coherent states | c. Associated to the L a channel, a decoherence timescale a is given by: using that || 2 = n. Over time, this channel tends to empty the cavity. The relaxation timescale r deduced from the equation (t) = e −at/2 is such that: A second decoherence timescale n is associated to the L n channel. In the Fock basis, its expression is straightforwardly given by s = 2/ n (m − n) 2. Still, we can rewrite it in a form more suitable for coherent states. Indeed, coherent states have a Poissonian distribution of photons with an average n = || 2 and a variance ∆n = || 2 = n. Thus, the characteristic width ∆n of a coherent state gives a characteristic upper bound m − n ≤ ∆n. This allows us to extract a characteristic time which, as we will see, correspond to a spreading in the angle variable in phase space: This timescale corresponds to the short time effect of L n. The long time effect is given for m − n = 1 and corresponds to a disappearance of the Fock states superposition. The associated timescale c, which corresponds to the formation of a rotation-invariant crown in phase space, is given by: Thus, the two natural decoherence timescales for coherent states are a and c, corresponding to the loss of coherence on the coherent state and number state basis respectively. The scaling a / c n / a n implies that for sufficiently high energy, we can have a separation of decoherence times, by first seeing a decoherence over the coherent states followed by one on the number basis. However, as we will see over the next sections, this simple interpretation gets more subtle when we properly take into account relaxation and we initially prepare Fock states. C. Phase-space approach The general solution Last section presented the general solution of the Lindblad Eq. using the quantum trajectories approach. It is also possible to solve the same problem from a phasespace perspective using characteristic functions of the density matrix. This approach offers interesting physical insights on the dynamical evolution imposed by the two incompatible channels. Given a density operator, we associate a function C (, * ), with ∈ C, defined as: This function is called the characteristic function adapted to the normal order. All the normally-ordered average values can be recovered from it. Now, by taking each term of the Lindblad Eq., it is a straightforward computation, using the BCH formula, to write it in terms of the characteristic function as: By using polar coordinates = r e i, we obtain the more transparent form: This equation is of the Fokker-Planck type with the righthand side being a diffusion term with a diffusion coefficient given by the coupling constant n. Thus, the channel L n tends to spread in angle the characteristic function. This is in accord with the first discussion of the evolution of fluctuations given through the average values. When only the channel L a is present, the differential equation is first order and can be solved by the method of characteristics. This is not directly the case here but can be remedied by going to the Fourier domain with respect to the variable. Doing this is indeed quite intuitive if we remember that the conjugated observable associated to is the number operator N which is the natural observable of the problem, and that both are related by a Fourier transform. Let's then define a new characteristic function C (r, n) =C (r, ) e in d which is the Fourier series of C (r, ). We then end up with an in-homogeneous first-order partial differential equation: By applying the method of characteristic, solving the equations = a and = 0, we obtain the general solution for an initial condition C 0 (r 0, n 0 ): From the radial damping, we recover the usual damping rate leading to decoherence on the coherent state basis. Besides this usual behavior, we have a new damping term depending on the square of the Fock variable n which leads to the decoherence in the Fock basis with the characteristic timescale s already uncovered in Section III B. Fock state decoherence Different initial states can be prepared. We will naturally focus on two classes of states, Fock states |n and coherent states |. As a warm up example, suppose that only the channel L n is present. If we prepare a Fock state |n, whose characteristic function is given by C |n () = L n (|| 2 ) where L n is the Laguerre polynomial of order n, we know that it will not be affected by the environment and will evolve freely as can be readily checked from Eq.. The important remark here is that the phase space representation of a Fock state or any statistical mixture of them is rotation invariant. If we now prepare a quantum superposition of Fock states (|n 0 + |n 1 )/ √ 2, it is straighforward to show that the coherence part of the characteristic function, denoted C 01, evolves as: The coherence term between Fock states are damped by an exponential factor with a characteristic timescale 2/(n 0 −n 1 ) 2 n scaling as the quadratic inverse of the "distance" between the two components of the state. The is the expected decoherence dynamics of the exact pointer states of a decoherence channel. Coherent states and the Wigner representation In the spirit of cavity quantum electrodynamics experiments, it is more appropriate to study the evolution of coherent states and their superposition. Before discussing the dynamics of such states, it is necessary to introduce a better-suited phase-space representation then the one defined by Eq.. Indeed, this latter function is a complex-valued function which is not the best choice for representation purposes. From the characteristic function C (, * ), we can recover an equivalent, real-valued, phase space representation called the Wigner function W () of the state (with the parameter ∈ C) defined as the following integral transform : Using the position and momentum coordinates in phase space = x + ip, we recover the common expression: The Wigner function is used in a wider context than quantum optics and possesses a nice set of properties to represent quantum interferences in a transparent way. We will use it throughout this paper to represent our analytical and numerical results. Its expression in terms of the density matrix in the Fock basis can be found in Appendix A. Furthermore, since the proper dynamics of the field mode can be factored out, the dynamics is pictured without the rotation in phase space that it introduces. If we now prepare a coherent state | whose characteristic function is given by C | () = e * − *, its evolution in the presence of only the L n decoherence channel is given by the convolution of the initial characteristic function with a Gaussian function N of the angle variable spreading in time with a variance 2 = n t (see Appendix B). In terms of the Wigner function, we have: with W 0 the initial Wigner function. This form displays clearly the diffusive dynamics induced by the L n decoherence channel in the phase angle, putting the initial intuition on firm ground. Figure 3 represents the evolution of the Wigner function of a superposition of coherent states. At a time scale s = 2/ n n, the Gaussian spots start to spread along circles with a radius given by their respective amplitude. The same can be said for the Gaussian interference spot which is centered at the mid-point in phase space. At later times t c, the coherent states spread uniformly and are completely decohered as a statistical mixture of Fock states 1. In fact, we can better understand the structure of the Wigner function at finite times by explicitly writing the periodicity in the variable hidden in Eq.. Indeed, with (z; ) = n∈Z exp(in 2 +2inz) the Jacobi theta function. When n t 1, the expansion 2 ; i nt 2 1 + 2 n∈N * e −nt/2 n 2 cos n can be used to approximate the Wigner function. For instance, if n t = 2, only the first harmonic of the Wigner function dominates with oscillations in amplitude of 74 % of the average value, the second harmonic being less than 2 %, as can be seen on Figs. 3 and 4. This harmonic decomposition also shows that in the presence of symmetries in angle, for instance W (r, + 2/p) = W (r, ) for an integer p ≥ 2, the first non-zero modulation term scales as e −ntp 2 /2. Thus, symmetric initial states will decohere much quicker than the ones that are not. Finally, note that the presence of an interference pattern on Fig. 3 is not a signature of coherence between Fock state. For a different preparation like the one on IV. EINSELECTION PROCESS Now that we have a proper analytic solution of the problem and that we completely understand the evolution and characteristic timescales of each decoherence channel separately, which is summarized in Fig. 5, we can analyze the einselection process in its full generality. A. Pointer states dynamics Intuitively, we expect the classical picture that emerges must be dominated by the quantum channel which has the strongest coupling constant. We can in fact play with two parameters, the ratio n / a and the average energy of the state n. One regime is when we have a / c ≈ n / a n 1 or equally n n / a. We expect in this case that Fock state decoherence dominates the dynamics. We can refine our statement by unraveling two subregimes with respect to the relaxation timescale r : In the range c a, r, or equally n n / a and r / c ≈ n / a 1, we have a proper decoherence on the Fock basis as we expected. However, for r c a, or equally n n / a and r / c ≈ n / a 1, we end up in the almost degenerate case n 1 where we relax on the vacuum state. Thus, in the regime a c where the Fock decoherence dominates initially, we see that the relaxation induced by the L a channel forbids to transition toward a proper nontrivial coherent-state classical picture: before evolving towards a proper statistical mixture of coherent states, the system relaxes to the vacuum. All in all, the emergent physically meaningful classical picture is given by Fock states. From the einselection perspective, a richer dynamics can be found in the complementary regime where a / c ≈ n / a n 1 or equally n n / a. Figure 6 shows the evolution of the Wigner function of a coherent state (with initially 40 photons on average) for this situation. We can at least see three different regimes: The regime with a n (first column with a = 5 n ). In this case, we see that coherent states remain largely unaffected by the environment and evolves according to the dynamics of the L a channel with the characteristic dissipation timescale r. The complementary regime with n a (third column with n = 5 a ) where the L n rules the dynamical evolution. We see that the coherent state is spread into a statistical mixture of Fock states over a timescale c which is then followed by the much slower process of relaxation. The intermediate regime with a ≈ n. In this case, it is not possible to clearly conclude which basis is the most classical one. The previous discussion focused mainly on a preparation of coherent states. A similar discussion can be made if we prepare a Fock state. As expected, if the L n channel dominates, we observe a decoherence over the Fock basis with a slow relaxation towards the vacuum induce by the L a channel. Nonetheless, some subtleties, detailed in Appendix B, occur in the opposite situation because coherent states do not form a proper orthogonal basis. In the same way that a very close superposition of coherent states will not properly decohere under the influence of the L a channel, a Fock state, being a continuous superposition of coherent states, cannot properly evolve toward a classical mixture of coherent states. B. Approximate pointer states For a general dynamics, exact pointer states, defined as states that do not get entangled with the environment if they are initially prepared, do not exist. Instead, we have to rely on an approximate notion of pointer states to give a meaningful notion of an emergent classical description. Approximate pointer states are defined as the states that entangle the least with the environment according to a given entanglement measure. This notion is the natural way to go to generalize the idea that classical states should be states that are robust to the action of the environment. Many different measures of entanglement exist in quantum information theory and it is not yet totally clear which one is the proper one to quantify the einselection process. For the sake of simplicity, we will consider the purity defined as 2 : Approximate pointer states are then defined by searching for pure states that minimize the initial variation of the entropy or, in our case, maximize the purity variation. Before proceeding, we could inquire about the dependence of the approximate pointer states that we find on the entanglement witness that we choose. For instance, we could have chosen a whole class of entropies S called the Rnyi entropies defined as S () = 1 1− tr. Using those entropies to find approximate pointer state do not change the conclusion if we are looking at the short-time evolution of a pure state. Indeed, if we prepare at t = 0 a pure state, we have that = for = 0. Then = 1− tr −1 = 1− tr which is equal, apart from a proportionality factor, to the evolution of purity. The approximate pointer states do not then depend on which measure we choose. Coming back to the purity, its derivative is linear in ∂ t. We thus have = a + n, where a (resp. n ) is the contribution solely due to the L a (resp. L n ) channel. First of all, since each of these contributions is nonpositive, a pure state will stay pure if and only if it stays pure for each channel. In this case, it is easy to see that, unless one of the two constants a or n is zero, the only state staying pure is the vacuum state. That's why, in general, no exact pointer states exist for a complex open quantum dynamics and we have to look for approximate ones. Having said that, we now have to search for pure states that maximize the evolution of the purity at initial time. As a function of the matrix elements mn of the initial state written in the Fock basis, the purities satisfy the equations: a = − a m,n | m,n | 2 (m + n) − 2 * m,n m+1,n+1 (m + 1)(n + 1). To find the approximate pointer states, we thus have to optimize the initial pure state that minimize the absolute purity variation, under the constraint of a unit norm. It is also natural to fix the average energy of the wavepacket (otherwise, the vacuum is a trivial optimum). Since it is difficult to perform this optimization analytically, we performed it numerically using the Pagmo library with the SNOPT algorithm. We see on Fig. 7 that, when the average energy is a multiple of the number of photons, the optimal state deforms from a coherent state 2 The purity can be used to define a notion of entropy S = 1 − called the linear entropy. to a Fock state. The optimal purity variation, opt as a function of ( a, n ), can be seen for different number of photons on Fig. 8. The time evolution of the approximate pointer states is discussed in Appendix D. Furthermore, a remarkable fact can be seen by looking at the overlap between the Fock state of energy n 0 and the optimal pure state | opt : Fock states remain the approximate pointer states even in the presence of the L a decoherence channel at small coupling a n. This is characterized by the presence of a plateau on the overlap | n 0 | opt | as a function of a /( a + n ) as shown on Fig. 8. To better understand this phenomenon, it is instructive to look at the evolution of the purity for a state not far away from the Fock state |n 0 and see how this small perturbation changes or not the optimal problem. Introducing the coefficients c n = n,n0 + n, with | n | 1, we have: a = −2 a n 0 (1 + 2 n0 ) 2 + n | n | 2 (n + n 0 ) The norm and energy constraints can be rewritten as the We see that in the low a regime, the approximate pointer state |opt is still a Fock state, until the critical value n/a = n0 + n0(n0 + 1) + 1/2. As expected, the variation of purity linearly decreases on this plateau. equation system: Using the global phase symmetry of the quantum state, we can fix n0 (an imaginary part on n0 corresponds to changing the phase of the initial Fock state). Furthermore, using both constraints, we can eliminate all the terms containing n0. The variation of purity takes the form: 2 = a n 0 − n | n | 2 n (n − n 0 ) 2 We see that under the normalization constraint, Fock states are always stationary points of the purity variation. Furthermore, provided that n > 4 a, the purity variation is a maximum along all directions except possibly on the plane ( n0−1, n0+1 ). We can thus concentrate on this plane. In this case, the constraint Eq. (34b) is simply | n0−1 | 2 = | n0+1 | 2. If we denote the phase difference between n0−1 and n0+1, the tipping point occurs when: n / a = n 0 + 1 2 + n 0 (n 0 + 1) cos. Since n has to be maximum, we can keep = 0. And thus: This equation gives the critical point where Fock states are no longer the einselected states of our dynamics and explains the qualitative features of Fig. 8. Actually, what we just shown is that a Fock state is always a stationary point of the purity variation. Given the norm and energy constraints, it moves from an extremum to a saddle point exactly when n / a = n 0 + n 0 (n 0 + 1) + 1/2, with n 0 the average energy of the state. When n 0 1, n / a ≈ 2n 0 + 1. As such, when n 0 becomes bigger, the size of the plateau on which the Fock state remains the einselected state becomes smaller. Away from the critical point, the most robust state becomes the state that interpolate between a number and coherent state of Fig. 7. V. DISCUSSIONS AND CONCLUSION We discussed here the einselection process in the presence of two incompatible decoherence channels and used the characterization of approximate pointer states as states that entangle the least with the environment. By choosing an entanglement measure, they are found by solving an optimization problem from the short-time open evolution of pure states (under natural constraints). Two drawbacks of this approach to the einselection problem can be stated: how the choice of the entanglement measure influences the answer and does an optimal state remain robust over time (validity of the short-time hypothesis)? As we already discussed, focusing on the short-time evolution basically solves the issue of which entanglement measure to choose, but the problem remains open in general. From the exact solution of the model, we can compare the evolution over time of the purity between the optimal, the Fock and the coherent state of a given energy as shown in Fig. 9. As expected, we observe that, at short times, the evolution of purity is the slowest for the optimal state (by construction) while, at very long time compared to the relaxation time, everything converges toward the same value since we are basically in the vacuum. However, at intermediate timescales, the evolution of purity gets quite involved and we see that it may even be possible that the optimal state does not remain so and this is strongly dependent on the coupling constants. In this case, one can get an intuition of this behavior because the optimal states have a smaller spreading on the Fock basis than the coherent states and as such, contract to the vacuum slower. Thus, while the definition of approximate pointer states is physically intuitive, the question remains on how to properly characterize them from an information perspective but also from a dynamical perspective. A general question that we can also ask in the perspective we adopted here on understanding the emergence of a classical picture from a complex environment would be the following: what can we learn about the dynamics and the einselection process of the model through the general features of the model like the algebraic relations between the jump operators? Naturally, the whole problem depends on the kind of dynamical approximations we do, if we start from the exact Hamiltonian dynamics or from an approximate master equation like the Lindblad equation as we did and still have in mind here. Focusing on the general dynamics first, we saw that, given the Lindblad equation and the commutation relations of the jump operators, the dynamics can be exactly solved algebraically by adopting a quantum channel perspective: the full dynamics of the model decouples and we can look at the evolution of both quantum channels separately, which is an easy task. In fact, our solution can be abstracted in the following sense. Consider an open quantum dynamics with a damping channel encoded by the jump operator L and a number decoherence channel encoded by the jump operator L L. Our approach can then be followed steps by steps again. It however opens the question on how to defined a proper phase space associated to a jump operator (through, for instance, generalized coherent states ), how to generalize characteristic functions like the Wigner representation and verify if the phase space perspective on decoherence developed throughout our analysis still holds in this generalized context. Concerning the einselection process however, note that having a general solution does not mean that finding the approximate pointer states is also solved. The problem remains here largely open when we have many incompatible decoherence channels. Indeed, not only does the algebraic structure influences the einselection process but also the set of coupling constants and how they run as a function of the energy. We can start to grasp those subtleties already when a thermal environment is present (taking into account a thermal environment in our model can be done as shown in Appendix C). Indeed, absorption processes can occur which forces us to consider a jump operator of the form L a = √ an a. However, such a jump taken alone is hardly meaningful (the state evolves toward an infinite energy configuration). Relaxation processes have to be taken into account and this is summed up by the usual commutation relations between a and a. Still, this is not sufficient to reach equilibrium and proper relations between the coupling constants of the different processes must exist. This well known example already shows that relating the general features of the open quantum dynamics to the einselection process is not that straightforward. The model studied in this paper behaves quite intuitively: when one of the coupling dominates, the associated decoherence channel controls the einselection process. However, when both couplings are comparable, the non-trivial commutation relations between the jump operators enter the game. No exact pointer basis exists and the robust states interpolate between the extreme cases and how far they are from them depends on the relative values of the coupling constants again. Still, we also unraveled the fact that, given some constraints, the transition from one class of pointer states to another as a function of the relative values of the couplings is not smooth: below a critical value, Fock states remains exactly the most robust states. How can such a behavior be anticipated from the structure of the dynamics remains to be explored. In the end, this shows that the question of predicting general features of the emergent classical picture only from the structure of the interaction (algebraic relations between jump operators, set of coupling constants) still calls for a deeper understanding. In summary, we solved exactly a model of decoherence for an open quantum system composed of two incompatible decoherence channels using quantum trajectories and phase-space techniques. We then studied numerically the dynamical emergence of a classical picture. We were then able to see how the selection of approximate pointer states depends on the relative values of the coupling constants. This unraveled the remarkable robustness of Fock states relative to a decoherence on the coherent state basis and we were able to analyze quantitatively the critical coupling where this robustness gets lost. Our results show that the physics of decoherence and einselection still has a lot to offer when a complex dynamics is at play. Decoherence on the coherent states basis If we initially prepare the cavity in the Fock state |m, then the L n dynamics is trivial. As we have seen on Eq., the state inside the cavity can be described as a classical mixture of Fock state, the probability to have the state |k being given by: which is a binomial distribution of parameters m and p = e −at. The mean of such a distribution is mp and its variance is 2 = mp(1 − p). For small times, we have 2 m a t. This timescale is comparable to the decoherence timescale a we introduced in Eq. for the coherent states. We see that in this case, it corresponds to the time it takes for the probability distribution to spread over several Fock states. In the Wigner function, it corresponds to the attenuation of the oscillations near the origin (see Fig. 10). On the contrary, this does not correspond to a fast decoherence over coherent states. Since the Fock states have no definite phase, it is natural to look for a mixture of coherent states which is uniform in its phase distribution. A natural expression for any state without phase preference would thus be: =q(n 0 ) | √ n 0 e i √ n 0 e i | ddn 0. We can easily show that = nq (n 0 )P n0 (n) dn 0 |n n|, where P n0 is the Poisson distribution of rate n 0. The resulting distribution over Fock states thus has a variance equal to its mean. This is approximately the case for the binomial distribution only when p 0. As such, the timescale to have decoherence over coherent states from an initial Fock states is 1/ a, the same as the relaxation scale. This situation is actually similar as the one for coherent states in respect to the L n dynamics: the short timescale governs the spreading in phase space, while the long timescale governs the decoherence between the Fock states. This is summarized on Table I. characteristic function is then slightly modified and becomes inhomogenous in the r variable: ∂ t + a 2 r∂ r C (r, n) = i c n − n 2 n 2 C (r, n) − an r 2 C. with C (T ) (r, n, t) the characteristic function obtained when the channel L n is absent (damped harmonic oscillator at non zero temperature).
package xyz.inosurvey.inosurvey.bean; import android.os.Parcel; import android.os.Parcelable; public class SurveyList implements Parcelable { private int id, respondentCount, respondentNumber, isCompleted, isSale; private String createdAt, closedAt, title, description, coin, backgroundColor; public SurveyList(){} public SurveyList(int id, String title, String description, String coin, String createdAt, String closedAt, int respondentCount, int respondentNumber, int isCompleted, int isSale, String bgColor){ this.id = id; this.title = title; this.description = description; this.coin = coin; this.createdAt = createdAt; this.closedAt = closedAt; this.respondentCount = respondentCount; this.respondentNumber = respondentNumber; this.isCompleted = isCompleted; this.isSale = isSale; this.backgroundColor = bgColor; } public SurveyList(Parcel in){ readFromParcel(in); } @Override public void writeToParcel(Parcel dest, int flags) { dest.writeInt(id); dest.writeString(title); dest.writeString(description); dest.writeString(coin); dest.writeString(createdAt); dest.writeString(closedAt); dest.writeInt(respondentCount); dest.writeInt(respondentNumber); dest.writeInt(isCompleted); dest.writeInt(isSale); dest.writeString(backgroundColor); } private void readFromParcel(Parcel in) { id = in.readInt(); title = in.readString(); description = in.readString(); coin = in.readString(); createdAt = in.readString(); closedAt = in.readString(); respondentCount = in.readInt(); respondentNumber = in.readInt(); isCompleted = in.readInt(); isSale = in.readInt(); backgroundColor = in.readString(); } public static final Creator<SurveyList> CREATOR = new Creator<SurveyList>() { @Override public SurveyList createFromParcel(Parcel in) { return new SurveyList(in); } @Override public SurveyList[] newArray(int size) { return new SurveyList[size]; } }; @Override public int describeContents() { return 0; } public int getId() { return id; } public void setId(int id) { this.id = id; } public int getRespondentCount() { return respondentCount; } public void setRespondentCount(int respondentCount) { this.respondentCount = respondentCount; } public int getRespondentNumber() { return respondentNumber; } public void setRespondentNumber(int respondentNumber) { this.respondentNumber = respondentNumber; } public int getIsCompleted() { return isCompleted; } public void setIsCompleted(int isCompleted) { this.isCompleted = isCompleted; } public int getIsSale() { return isSale; } public void setIsSale(int isSale) { this.isSale = isSale; } public String getCreatedAt() { return createdAt; } public void setCreatedAt(String startedAt) { this.createdAt = startedAt; } public String getClosedAt() { return closedAt; } public void setClosedAt(String closedAt) { this.closedAt = closedAt; } public String getTitle() { return title; } public void setTitle(String title) { this.title = title; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public String getCoin() { return coin; } public void setCoin(String coin) { this.coin = coin; } public String getBackgroundColor() { return backgroundColor; } public void setBackgroundColor(String backgroundColor) { this.backgroundColor = backgroundColor; } }
// Insert takes data and inserts it at the given offset. func (g *GapBuffer) Insert(offset int, data []byte) { g.ensureGapCapacity(len(data)) g.setGapStart(offset) copy(g.buf[g.gapStart:], data) g.gapStart += len(data) }
KINETIC STUDIES OF COLOUR AND PHENOL REMOVAL FROM WASTEWATER USING MANGO SEED SHELL ACTIVATED CARBON Kinetic studies by batch technique were carried out using activated carbon prepared from mango seed shell for the adsorption of colour and phenol from wastewater. The data generated from the studies were used for kinetic modelling in order to determine the rate and mechanism of adsorption. The results revealed that adsorption was best fitted by the pseudo-second order model for both methylene blue (MB) and phenol adsorptions. Intra-particle diffusion kinetic plots did not pass through the origin, indicating that intra-particle diffusion was not the only rate controlling mechanism. The results showed that mango seed shell activated carbon (MSSAC) is more effective for adsorption of colour than phenol in wastewater. INTRODUCTION Adsorption is a time-dependent process and it is very important to know the rate of adsorption for design purposes. The rate and mechanism of adsorption is controlled by various factors like physical and/or chemical properties of adsorbent, ambient temperature, pH of medium and nature of adsorbate. The prediction of adsorption rate gives important information for designing adsorption systems. Kinetic models have been used to investigate the mechanism and the rate controlling steps of sorption which are helpful for selecting optimum operating conditions for the full-scale batch process. Pseudo-first-order, pseudo-second-order, Elovich and intra-particle diffusion kinetic models have been used by various investigators. Among these are adsorption of basic red by Euphorbia Antiquorum L wood based activated carbon (Sivakumar and Palanisamy, 2009), removal of Cu(II) ion from aqueous solution using sago waste as adsorbent (Maheswari et al, 2008); phenol removal from aqueous systems by tendu leaf refuse (Nagda et al, 2007); adsorption of phenol and dye from aqueous solution using chemically modified date pits activated carbons (Belhachemi and others, 2009) and adsorption of remazol black 5 on palm kernel shell activated carbon (Zawani et al, 2009).Others are the biosorption of zinc onto gallus domesticus shell powder ( Kalyani et al, 2009), adsorption of methylene blue onto gulmohar plant leaf powder ( Ponnusami et al, 2009), methylene blue (MB) removal from aqueous solution by adsorption on treated sawdust (Bello et al, 2010) among others. This study is fashioned to determine the kinetics of adsorption of colour and phenol from wastewater using activated carbon prepared from mango seed shells as adsorbent. This will be achieved by calibrating kinetic models proposed in literature in order to understand the mechanism of adsorption of these pollutants by mango seed shell activated carbon (MSSAC). This study is of interest because knowledge of adsorption rate and mechanism is very important for selecting optimum operating conditions for design purposes. THEORETICAL CONSIDERATION Kinetics of adsorption, describing the solute uptake rate, which in turn governs the contact time of adsorption process, is one of the important characteristics defining the efficiency of adsorption (Patil et al, 2006). In order to investigate the mechanism of MB and phenol adsorption by mango seed shell based activated carbon (MSSAC), the following four kinetic models were considered: Pseudo-First-Order Model The pseudo-first-order rate expression based on solid capacity is generally expressed as follows : is the rate of adsorption, q e is the amount of adsorbate adsorbed at equilibrium (mg/g), q t is the amount adsorbed at any time t (mg/g), k 1 is the rate constant of first order adsorption (l/minutes). After integration and applying boundary conditions, t = 0 to t and q t = 0 to q t ; Equation becomes:.... Values of adsorption rate constant (k 1 ) for the adsorbate are determined from the straight line plot of log (q e -q t ) against t. A high value of the correlation coefficient is an indication that the rate of removal of the adsorbate by the adsorbent follows the pseudo-first-order equation. The parameter log (q e ) is an adjustable parameter and often it is found not equal to the intercept of a plot of log(q e -q t ) against t, whereas in a true first order model, log(q e ) should be equal to the intercept of a plot of log(q e -q t ) against t. Therefore one has to find some means of extrapolating the experimental data to t =∞, or treat q e as an adjustable parameter to be determined by trial and error. For this reason, it is necessary to use trial and error to obtain the equilibrium sorption capacity, q e, to analyze the first order model kinetics (Ho and MCkay, 1998). Pseudo-Second-Order Model The pseudo-second-order equation is also based on the sorption capacity of the solid phase. It predicts the behaviour over the whole range of data. Furthermore, it is in agreement with chemisorption being the rate controlling step and is expressed as (Ho et al, 2000): Where, k 2 is the rate constant of second orderadsorption (g/mg.min)., q t, q e and t are as Equation does not have the disadvantage of the problem with assigning an effective q e. If pseudo-second order kinetics are applicable, the plot of t /q t against t of Equation should give a linear relationship from which q e and h values will be determined from the slope and intercept of the plot respectively and there is no need to know any parameter beforehand (Ho and Mckay, 1998).k 2 will then be determined from Equation. Elovich Model The Elovich equation is mainly applicable for chemisorption kinetics. The equation is often valid for systems in which the adsorbing surface is heterogeneous (Sivakumar and Palanisamy, 2009). The Elovich model is generally expressed in linear form as:.. Where, is the initial adsorption rate (mg/g minutes) and is the desorption constant and is related to the extent of surface coverage and the activation energy for chemisorption (g/mg). q t, q e and t are as defined under Equation 1.A plot of q t against ln t gives a linear trend with a slope of (1/ ) and an intercept of 1/ ln ( ). Intra-particle Diffusion Study The most commonly used technique for identifying the mechanism involved in the adsorption process is by using intra-particle diffusion model as proposed by Weber and Morris thus:.. Where, K d is the intra-particle diffusion rate constant. q t and t are as defined under Equation 1. If intra-particle diffusion occurs, then a plot of q t against t 1/2 will be linear and the line will pass through the origin if intra-particle diffusion was the only rate limiting parameter controlling the process. Otherwise, some other mechanism such as external mass transfer is also involved. Values of I give an idea about the thickness of the boundary layer. MATERIALS AND METHODS 100ml of test samples of pH 7 with initial methylene blue (MB) concentration of 87 Pt-Co units were measured in 6 different 500ml beakers. Specified adsorbent doses of 200mg were added to each beaker and stirred at 98 revolutions per minute (rpm) using electrically operated paddles of a flocculator (ESF 12/10 model). In the case of phenol, the carbon dose and pH used were respectively 1000mg and 8, while the initial phenol concentration was 0.047mg/l. kinetic studies were performed at a constant room temperature of 34±2 o C. The preparation procedure and the characteristics of the mango seed shell activated carbon (MSSAC) are reported elsewhere (Akpen, 2011;Akpen et al, 2011). Samples were taken at preset time intervals of 5, 10, 20, 30, 40, 50, 60, 90 and 120 minutes and 30, 60, 90, 120, 150 and 180 minutes in the case of MB and phenol respectively. The contents were allowed to settle for 2 minutes and filtered through no. 42 filter paper prior to analysis in order to minimize interference of the carbon fines with the analysis. The residual concentrations of MB and phenol were analysed according to standard methods as specified in HACH Model DR/2000 Spectrophotometer operator's manual. The carbon particle size used was 150-850 m in the case of MB and 150-425 m for phenol. The same procedures were repeated for other initial concentrations of 126, 181, 240 and 262 Pt-Co units in the case of MB and 0.069, 0.074, 0.114 and 0.123mg/l in the case of phenol. The amount of adsorption at time t, q t, was calculated by:.... Where, C 0 and C t are the liquid-phase concentration of pollutants at initial and any time t, respectively. V is the volume of the solution, and m is the mass of dry adsorbent used. RESULTS AND DISCUSSION The data on kinetics of adsorption was processed to understand the dynamics of adsorption process in terms of the order of rate constant. A typical pseudo first-order plot for the adsorption of MB by MSSAC is given in Fig.1. The calculated rate constants and q e values and the corresponding linear regression correlation coefficients, r values for all the kinetic models are presented in Tables1-4. It is obvious fromTables1-4 that, the first-order kinetic model did not fit well to the experimental data because the experimental and calculated values of the equilibrium removal capacity, q e were different even though the correlation coefficients were high (0.829-0.990). The pseudo first-order rate constant k 1 generally decreased with increase in initial MB concentration for all the experimental carbons except Dausha 1:2 adsorbent. For local 1:2 adsorbent, k 1 decreased from 0.154 to 0.039 min -1 when the initial MB concentration was increased from 87-262 Pt-Co units. A typical pseudo first-order plot for the adsorption of phenol by MSSAC is given in Fig.2. The corresponding parameters and coefficients are depicted in Table 5. The correlation coefficients (r) ranged from 0.424 to 0.750. Just as in the case of MB adsorption, first-order kinetic model did not fit well to the experimental data because the experimental and calculated values of the equilibrium removal capacity, q e were significantly different. Besides, the low r values suggest that the adsorption data fitted poor to pseudo firstorder kinetics. Thus, the adsorption of phenol onto MSSAC does not follow the pseudo firstorder rate model. Tables 1-4. Unlike pseudo first order model, good correlation is observed between experimental data and the pseudo-second-order kinetic model with r values generally higher than 0.99. For the four variations of the mango seed shell activated carbons (MSSAC) studied, the differences between the experimental and the calculated values of the equilibrium removal capacity, q e were not significant. This suggests that the ratelimiting step in these biosorption systems may be chemisorptions involving valence forces through sharing or exchange of electrons between adsorbent and MB as reported by Ho and McKay. The equilibrium sorption capacity, q e increase with increase in initial MB concentration while initial sorption rate, h generally decreased with increase in initial MB concentration for all the experimental activated carbons studied. KINETIC STUDIES OF COLOUR AND PHENOL REMOVAL FROM WASTEWATER It can be noticed that the pseudo secondorder rate constant, k 2 for Dausha 1:2 decreased from 0.0412 to 0.000743 (Pt-co units /mg min) with increase in initial MB concentration from 87-262 Pt-co units (see Table 1). The decrease in the rate of MB removal with increasing initial MB concentration may be due to decreasing MB diffusivity as a result of the association of dye molecules to form bulky aggregates which becomes more pronounced at high dye concentration. KINETIC STUDIES OF COLOUR AND PHENOL REMOVAL FROM WASTEWATER The correlation coefficients, r ranged from 0.980 to 0.999, which is higher than pseudo first-order values (0.829-0.980). From these results it can be suggested that pseudo second-order describes the adsorption of MB by MSSAC better than pseudo first order model. Similar results were reported by Sivakumar and Palanisamy on the adsorption of basic red 29 by euphorbia antiquorum L based activated carbon. The same behaviour was observed by Bello et al for the adsorption of methylene blue onto treated saw dust. In the case of phenol adsorption, a typical second order plot is presented in Fig.4, while the adsorbent parameters and correlation coefficients are shown in Table 5. The high values of the rate constant k 2 (8.75-18.79) and correlation coefficients, r (0.997-0.999) suggest that the kinetics of phenol adsorption follows the pseudosecond order kinetic model. A typical Elovich plot for the adsorption of MB by MSSAC at various initial dye concentrations is given in Fig.5. The plots are linear with good correlation coefficients (r = 0.858 to 0.997). The initial adsorption rate,, increased, while desorption constant, decreased with increase in initial dye concentration for all the experimental activated carbons (see Tables 1-4). The increase in the values of can be attributed to an increase in the driving force for mass transfer, i.e. more MB molecules are able to reach the surface of the adsorbent in a shorter period of time. 6 depicts a typical intra-particle kinetic plot for the four experimental carbons with respect to MB adsorption. If intra-particle diffusion occurs, then a plot of q against t 1/2 will be linear and the line will pass through the origin if the intra-particle diffusion was the only rate limiting parameter controlling the process. However, for this present study, the linear plots for a wide range of contact times do not pass through the origin. This deviation from the origin may be due to the variation of mass transfer in the initial and final stages of adsorption (Sivakumar and Palanisamy, 2009). This indicates that although intra-particle diffusion was involved in the adsorption process, it was not the sole sorption rate-controlling step. This also confirms that adsorption of MB on the adsorbent was a multistep process, involving adsorption on the external surface and diffusion into the interior. The intra-particle diffusion rate, k d and the linear correlation coefficients are presented in Tables1-4. Values of k d decreased with increase in initial MB concentration and ranged from 0.48-4.466. In general high k d values illustrate a better adsorption mechanism, which is related to an improved bounding between the MB and the adsorbent particles (Demirbas et al, 2004). The kinetic data obtained for Dausha 1:3 fits the intra-particle diffusion model better given the high correlation coefficients (r=0.934-0.997) obtained. On the other hand, the intercept of the plots, I reflect the thickness of the boundary layer Abdelwahab. The larger the intercept, the greater is the contribution of the surface sorption in the rate-limiting step (El-Latif et al, 2010). CONCLUSION Kinetic studies were carried out using activated carbons prepared from mango seed shell on the adsorption of colour and phenol from wastewater. Kinetic data generated from these studies were used for kinetic modelling in order to determine the rate and mechanism of adsorption. Adsorption was best fitted by the pseudosecond order kinetic model for both colour and phenol adsorptions with correlation coefficients, r generally greater than 0.99 for all the experimental carbons studied. This suggests that the rate-limiting step may be chemisorptions involving valence forces through sharing or exchange of electrons between adsorbent and MB. The mechanism of adsorption was rather complex and is probably a combination of external mass transfer and intra-particle diffusion given that the linear intra-particle diffusion kinetic plots do not pass through the origin. These results revealed that MSSAC is more effective for the adsorption of colour than phenol from wastewater.
<reponame>jsirex/habitat use super::super::RenderResult; use crate::hcore::{fs, package::{Identifiable, PackageIdent}}; use handlebars::{Handlebars, Helper, HelperDef, RenderContext, RenderError}; use std::str::FromStr; #[derive(Clone, Copy)] pub struct PkgPathForHelper; impl HelperDef for PkgPathForHelper { fn call(&self, h: &Helper<'_>, _: &Handlebars, rc: &mut RenderContext<'_>) -> RenderResult<()> { let param = h.param(0) .and_then(|v| v.value().as_str()) .and_then(|v| PackageIdent::from_str(v).ok()) .ok_or_else(|| RenderError::new("Invalid package identifier for \"pkgPathFor\""))?; let deps = serde_json::from_value::<Vec<PackageIdent>>(rc.context().data()["pkg"]["deps"].clone()) .unwrap(); let target_pkg = deps.iter() .find_map(|ident| { if ident.satisfies(&param) { Some(fs::pkg_install_path(&ident, Some(&*fs::FS_ROOT_PATH)).to_string_lossy() .into_owned()) } else { None } }) .unwrap_or_default(); rc.writer.write_all(target_pkg.into_bytes().as_ref())?; Ok(()) } } pub static PKG_PATH_FOR: PkgPathForHelper = PkgPathForHelper;
As discussed in Bauer et al (Journal of Biotechnology, 2011, in press), in the manufacture of food, there is a demand of stable and well-conditioned starter, protective and probiotic cultures. One of the well established preservation (conservation) processes used during the preparation of these cultures is freeze drying, as it is known to be a gentle drying method leading to minimal damage in micro-organisms. Freeze-drying, also called lyophilisation, is a preservation (or conservation) process whereby the material is frozen (for example into blocks, drops or pellets) at a temperature of below 0° C. The surrounding pressure is then reduced to a range from 10-80 Pa (75-600 mTorr) (Bactéries lactiques De la génétique aux ferments, 2008), generally around 13-27 Pa (100-200 mTorr) and enough heat is added to allow the frozen water in the material to sublime directly from the solid phase to the gas phase. The material can be frozen into the freeze-dryer or introduced directly under a frozen form into the freeze-dryer. However, freeze drying is a lengthy and energy intensive process (Knorr, 1998; Regier et al., 2004). Moreover, the survival of some bacterial strains is negatively affected by the freezing process (Meryman et al., 1977; Meryman, 2007). An alternative drying method is vacuum drying, which works at positive temperatures by applying vacuum. Using this drying method at conventional conditions (temperature range between 30 and 80° C.) may cause high losses of cells due to heat damage (Valdramidis et al., 2005). However, heat stresses can be reduced by further reducing the chamber pressure to values just above the triple point of water, which leads to low product temperatures close to 0° C. This process is referred to as Controlled Low-Temperature Vacuum Dehydration (CLTV). King et al. (1989) developed this method for the drying of sensitive food ingredients and also showed that it is applicable to the drying of micro-organisms such as Lactobacillus acidophilus (King and Su, 1993). Probiotics are well known and are used as dietary supplements. Some probiotics have been preserved by freeze-drying. It is also known that cells which are freeze-dried in the presence of protective agents are better suited to maintain their viability and stability than cells which are freeze-dried without the addition of said protective agents. So generally a protectant is mixed with fresh cell concentrate prior the freeze-drying step. Freeze-drying can be performed using different techniques. In particular freeze-drying can be performed by tray drying. In this process, the stabilized cell concentrate is loaded directly into freeze dryer trays. The cells are frozen by contact with shelves maintained at a freezing temperature and freeze-dried in a commercial freeze-drier. The resulting cake may then be milled to make a powder, for example which is used in probiotic blends. Another common freeze-drying process used to preserve cultures is to freeze-dry them in frozen pellet form. The frozen pellets may be formed by dripping stabilized culture onto a chilled surface (such as a chilled barrel or a chilled belt) or into liquid nitrogen. The frozen pellets can be produced and stored independently of freeze-drier availability, and can be easily loaded into freeze drier trays. The resulting dry pellets may then be milled to make a powder, for example which is used in probiotic blends. Probably the biggest difference between tray-drying and freeze-drying pellets is the rate of freezing. During both freezing processes ice crystals of pure water form, pushing together cells and dissolved solutes into the interstitial spaces between the crystals. When freezing in liquid nitrogen the ice crystals form nearly instantaneously, while freezing in trays allows the ice crystals to grow slowly and hence to a larger size. Freeze-drying removes the ice crystals leaving behind a matrix of interstitial spaces of now dry material. Scanning Electron Microscopy (SEM) of dried material shows that pelletized materials using standard freeze-drying processes have microscopic channels and interstitial matrices and cells are at, or near, the surface. On the contrary tray-dried materials have much larger channels and interstitial matrices and the cells are encapsulated within the matrix material leading to a better protection of the cells. On the other part, standard tray-drying processes are time-consuming in comparison with standard pellets freeze-drying ones, especially due to two factors: a) the slow freezing time limited by heat transfer from the shelves to the material ; b) the slow drying time due to the longer distance needed for water to escape from cells (the resulting cake obtained by the tray-drying process are larger in size than the pellets). Another issue linked with tray drying is the difficulty required by its logistics, e.g. the freeze-drier must be close to the fermentation and the timing of the fermentation harvest and drying must be synchronized. Therefore there is a need to develop an improved freeze-drying process allowing enhanced characteristics of the micro-organisms (such as a better stability). The present invention alleviates the problems of the prior art. In one aspect the present invention provides a process for the preparation of freeze dried micro-organism composition, comprising the step of (i) subjecting a frozen composition comprising micro-organisms to a drying pressure of from 133 Pa [1000 mT] to 338 Pa [2540 mT] such that at the drying pressure the frozen composition is dried by sublimation of water present in the frozen composition to provide a freeze dried composition comprising the micro-organisms. In one aspect the present invention provides a process for the preparation of a food or feed, the process comprising (a) preparing a freeze dried micro-organism by a process comprising the step of (i) subjecting a frozen composition comprising micro-organisms to a drying pressure of from 133 Pa [1000 mT] to 338 Pa [2540 mT] such that at the drying pressure the frozen composition is dried by sublimation of water present in the frozen composition to provide a freeze dried composition comprising the micro-organisms; (b) combining the freeze dried micro-organism composition with a foodstuff or feedstuff. In one aspect the present invention provides a freeze dried micro-organism composition obtainable by a process comprising the step of (i) subjecting a frozen composition comprising micro-organisms to a drying pressure of from 133 Pa [1000 mT] to 338 Pa [2540 mT] such that at the drying pressure the frozen composition is dried by sublimation of water present in the frozen composition to provide a freeze dried composition comprising the micro-organisms. In one aspect the present invention provides a freeze dried micro-organism composition prepared by a process comprising the step of (i) subjecting a frozen composition comprising micro-organisms to a drying pressure of from 133 Pa [1000 mT] to 338 Pa [2540 mT] such that at the drying pressure the frozen composition is dried by sublimation of water present in the frozen composition to provide a freeze dried composition comprising the micro-organisms. In one aspect the present invention provides a food or feed comprising (a) a freeze dried micro-organism composition as defined herein; and (b) a foodstuff or feedstuff. In one aspect the present invention provides use of drying pressure to prepare a freeze dried micro-organism composition having improved stability and/or improved cell count and/or increased density and/or improved dispersibility, wherein a drying pressure of from 133 Pa [1000 mT] to 338 Pa [2540 mT] is applied to a frozen composition comprising micro-organisms to dry the frozen composition by sublimation of water present in the frozen composition. Aspects of the invention are defined in the appended claims. The present invention provides novel drying techniques for the preparation of freeze dried compositions containing micro-organisms. In particular the present invention provides a process in which frozen compositions containing micro-organisms are freeze-dried. In this process freeze-drying is performed at pressures which are higher than those normally used for freeze-drying. The skilled man would not have expected to obtain increased micro-organisms characteristics, such as stability, since it would be expected that a high pressure would have been damaging for the micro-organisms. An advantage of the present invention is that this process (i.e. the use of a high pressure for freeze-drying micro-organisms) may be implemented in different drying techniques such as pellet drying and tray-drying, leading to improved results. In comparison with the dried pellets obtained using standard freeze-drying techniques (such process using a pressure of 100 mT), cell counts, shelf stability, density and dispersibility of the freeze-dried micro-organisms are enhanced. In comparison with standard tray-drying process, it has been found that the cell counts and the cell stability of the compositions freeze-dried in accordance with the present invention surpass the ones obtained for commercially tray-dried material. Furthermore the bulk density of freeze-dried compositions in accordance with the present invention after milling is equivalent to the best tray-dried products. Moreover scanning electron microscopy (SEM) images indicate that cells of micro-organisms in the present compositions are encapsulated in a matrix; it is understood that this translates to their better stability. For ease of reference, these and further aspects of the present invention are now discussed under appropriate section headings. However, the teachings under each section are not necessarily limited to each particular section.
<filename>DepanNode/prod/src/com/google/devtools/depan/nodes/filters/sequence/NodeKindFilter.java /* * Copyright 2016 The Depan Project Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.devtools.depan.nodes.filters.sequence; import com.google.devtools.depan.model.Element; import com.google.devtools.depan.model.GraphNode; import com.google.common.collect.Lists; import java.util.Collection; import java.util.List; /** * @author <a href="<EMAIL>"><NAME></a> */ public class NodeKindFilter extends BasicFilter { private Collection<Class<? extends Element>> nodeClasses; public NodeKindFilter(Collection<Class<? extends Element>> nodeClasses) { this.nodeClasses = nodeClasses; } @Override public Collection<GraphNode> computeNodes(Collection<GraphNode> nodes) { List<GraphNode> result = Lists.newArrayList(); for (GraphNode node : nodes) { if (matchesKind(node)) { result.add(node); } } return result; } /** * Determine if node is an instance of one of the acceptable classes. * * @param node item to test * @return {@code true} iff node is an instance of an accepting class */ private boolean matchesKind(GraphNode node) { for (Class<? extends Element> nodeClass : nodeClasses) { if (nodeClass.isInstance(node)) { return true; } } return false; } public Collection<Class<? extends Element>> getNodeKinds() { return nodeClasses; } }
The combined impact of diet, physical activity, sleep and screen time on academic achievement: a prospective study of elementary school students in Nova Scotia, Canada Background Few studies have investigated the independent associations of lifestyle behaviors (diet, physical activity, sleep, and screen time) and body weight status with academic achievement. Even fewer have investigated the combined effect of these behaviors on academic achievement. We hypothesize that the combined effect of these behaviors will have a higher impact on academic achievement than any behavior alone, or that of body weight status. Methods In 2011, 4253 grade 5 (1011 years old) students and their parents were surveyed about the childs diet, physical activity, screen time and sleep. Students heights and weights were measured by research assistants. Academic achievement was measured using provincial standardized exams in mathematics, reading and writing, and was expressed as meeting or not meeting expectations as per standardized criterion. Exams were written 1 year following the measurement of lifestyle behaviors. Lifestyle behaviors were measured with self- and parental proxy reports and expressed as meeting recommendations (yes/no) for each behavior. Mixed effects logistic regression models adjusting for demographic confounders and caloric intake were used to determine the independent and combined associations. Results Meeting dietary recommendations was associated with increased likelihood of meeting academic expectations for each of math, reading and writing. Meeting recommendations for screen time and sleep was associated with meeting expectations for writing. For all three subjects, meeting additional lifestyle behavior recommendations was associated with higher likelihood of meeting expectations. Children who met 79 lifestyle behavior recommendations had greater than three-times the odds of meeting expectations for reading compared to those who met 03 recommendations (OR: 3.07, 95% CI: 2.09, 4.51), and 1.47 and 2.77 times the odds of meeting expectations in mathematics and writing, respectively. Body weight status was not associated with academic achievement. Conclusions We found that lifestyle behaviors, not body weight status, are strongly associated with student academic performance. Promoting compliance with established healthy lifestyle recommendations could improve both the health and educational outcomes of school-aged children. School-based health promotion initiatives that target multiple lifestyle behaviors may have a greater effect on academic achievement than those that focus on a single behavior. Electronic supplementary material The online version of this article (doi:10.1186/s12966-017-0476-0) contains supplementary material, which is available to authorized users. Background It is established that academic success in childhood and adolescence is a strong predictor of future wealth, productivity and health. Provided this, attention to children's academic achievement must be taken into consideration by public health decision makers aiming to prevent chronic diseases and improve health across the lifespan. This includes not only the resources devoted to educational attainment, but an understanding of the indirect lifestyle factors the help to shape childhood and adolescent academic success. Healthy diets, sufficient physical activity and sleep, and minimal screen time contribute to a healthy lifestyle, are important to children's cognitive performance during development, and may potentially optimize academic success. Findings from studies investigating the relationship between individual lifestyle behaviors and academic achievement have demonstrated that children with healthy lifestyle behaviors perform better academically [10,. In particular, reductions in children's time spent in physical education has motivated substantial study of the relationship between physical activity and academic achievement. However, few studies have investigated the independent association of multiple lifestyle behaviors with academic achievement; particular the potential confounding in the observed relationship given the strong correlation between lifestyle factors. For example, levels of sleep are associated with academic achievement, yet screen time has been shown to be associated with both sleep and academic achievement. If screen time is not taken into consideration when evaluating the association between sleep and academic achievement, the observed relationship may, in part, have contributed to the effects of screen time. Some scholars have hypothesized that healthy lifestyle behaviors do not act in isolation in their relationship with academic achievement, and that the effects of exhibiting multiple healthy lifestyle behaviors may be greater than the sum of their individual effects. To our knowledge, only two studies have investigated this hypothesis, and both have found convincing evidence in support of it. Using self-reported health data in a cohort of Spanish adolescents, Martinez-Gomez et al. found that meeting recommendations for 3-4 lifestyle behaviors was associated with higher odds achieving passing grades in Language and Literature and Math, in girls only, compared to those that met fewer recommendations. In a sample of American children in a low-income urban distract, Ickovics et al. found that children exhibiting the largest number of healthy lifestyle behaviors, including a healthy body weight, were more than twice as likely to meet goal on standardized exams compared to those with the fewest. Further investigation of this hypothesis is merited to inform effective health promotion in children, particularly school-based initiatives. Many health promotion initiatives based in schools have focused on singular components of a healthy lifestyle; addressing multiple behaviors simultaneously may produce cumulative benefits that impact both health and academic outcomes. Our aim is to investigate the independent and combined effects of lifestyle behaviors, as well as body weight status, on children's academic achievement on standardized exams using a large, population-based sample of grade 5 students from Nova Scotia, Canada. We aim to complement and expand on previous work by using a large, population-based sample as well as including a wide range of dietary components, sleep duration and adherence to established recommendations for sleep, objectively measured heights and weights, and standardized exam results in addition to measures of physical activity and screen time. We evaluate children's lifestyle behaviors by their adherence to established health recommendations from Health Canada, the United States Departments of Health and Human Services and Agriculture, and the World Health Organization (WHO) for each behavior in order to improve interpretability and applicability of results for public health decision makers. We hypothesize that each lifestyle behavior of interest (diet, physical activity, screen time and sleep) will have an independent effect on academic achievement. We also hypothesize that the combined effect of multiple healthy lifestyle behaviors will have a greater effect on academic achievement than their respective individual effects. Methods The 2011 Children's Lifestyle And School performance Study (CLASS) is a population-based survey examining lifestyle behaviors, weight status, and academic achievement of grade 5 (mostly 10-11 years old) students in Nova Scotia, Canada. All grade 5 students in Nova Scotia, their parents or guardians, and school administrators were invited to participate in the study. Of all schools that had grade 5 classes in the province, principals in 269 of 286 (94.1%) schools provided consent for participation. Following consent from the principal, home packages, including consent forms, were sent home to all parents and guardians of grade 5 children in the school. Parental consent to participate in the survey was provided for 6591 students out of 8736 that were distributed, resulting in an average response rate of 75.4% per school. Of these, 1169 (17.7%) were absent the day of the survey, did not complete the survey, or had caloric intakes <500 or >5000 kcal as these values are considered unrealistic, and as such were excluded from analysis, leaving 5422 eligible students. Of remaining students, 4253 (78%) could be successfully linked with their achievement on grade 6 standardized exams in Reading, Writing and Mathematics that were written one full year following the lifestyle behavior assessment; the resulting overall completion rate was 64.5%. Further information and the survey used can be found at http://www.nsclass.ca. Data collection Trained research assistants travelled to participating schools to administer surveys during classroom time. Students completed two surveys. The student survey contained questions about habitual physical activity and personal perceptions about diet. Students also completed the Harvard Food Frequency Questionnaire for Youth/ Adolescents (YAQ), a 147-item validated questionnaire adapted for Canadian use that measures habitual intake over the past 12 months. Research assistants measured students' heights to the nearest 0.1 cm and weights to the nearest 0.1 g using calibrated stadiometers and scales as per standard protocol. Parents completed a home survey reporting on children's sleep habits and screen time usage, as well questions on household income and parental level of education. Exposures Diet We employed the Harvard Food Frequency Questionnaire YAQ to evaluate students dietary consumption. The YAQ contains 147 questions, 135 of which are regarding specific food items and 11 of which relate to food habits (eating in front of the TV, etc.), about the frequency of consuming items over the past year. Nutrient information was derived using the Canadian Nutrient File (CNF), a Canadian nutrient composition database for commonly consumed foods in Canada. Students' consumption was evaluated with respect to meeting age-specific recommendations from Health Canada's Eating Well with Canada's Food Guide. This includes recommendations for daily servings of vegetables and fruit (6 servings), grain products (6 servings), milk and alternatives (3-4 servings) and meat and alternatives (2 servings). The Canadian Food Guide does not have specific recommendations for saturated fat and free sugar intake. Instead we used recommendations developed for American youth for saturated fat intake (<10% of total energy intake), and for free sugars consumptionrepresenting sugars that are added during food processing, not naturally occurring sugars such as lactosewe followed 2015 World Health Organization recommendations (<10% of total energy intake). We calculated energy intake using responses from the YAQ. Physical activity The student survey contained the Physical Activity Questionnaire for Children (PAQ-C) instrument, a selfadministered, 10-item physical activity recall instrument. The questionnaire has been validated to measure general levels of moderate-to-vigorous physical activity in children aged 8-14. A score between 0 and 5 was calculated from responses for each student, with higher scores indicating higher levels of physical activity. Cutoff values indicating 'healthy fit' or 'at-risk' in regards to cardio-respiratory fitness for the PAQ-C have been established for children: 2.7 for girls and 2.9 for boys. As such, meeting recommendations for physical activity that correspond with healthy cardio-respiratory fitness were assessed using these cutoff values. Screen time Parents were asked: On average, about how many hours per day does your Grade 5 child spend watching TV not including school hours? Possible responses were: Less than 1 h a day, 1-2 h per day, 3-4 h per day, or 5 or more hours per day. Children were described as meeting sedentary behavior recommendations if total screen time from television watching was less than 2 h per day as per the Canadian Sedentary Behavior Guidelines. Sleep Parents reported habitual wake up and bed times for children on usual weekdays and weekends. Parents were asked: At what time does your child usually wake up during a) the week (Monday to Friday) and b) the weekend (Saturday and Sunday)? Possible responses were before 6:30 am, 6:30-7:00 am, 7:00-7:30 am, 7:30-8:00 am, 8:00-8:30 am, 8:30-9:00 am and after 9:00 am. Parents were also asked: At what time does your child usually go to bed during a) the week (Sunday to Thursday) and b) the weekend (Friday and Saturday)? Possible responses were before 8:00 pm, 8:00-8:30 pm, 8:30-9:00 pm, 9:00-9:30 pm, 9:30-10:00 pm, 10:00-10:30 pm, and after 10:30 pm. Finally, sleep duration was calculated based on usual bed and wake up times with usual time to fall asleep subtracted for each of weekday and weekend days. Average nightly sleep duration for a typical week was calculated from the mean sleep duration of five weekdays and two weekend days. Students were described as meeting sleep duration recommendations if average duration was between 9 and 11 h, as recommended by the National Sleep Foundation. Body weight status Children's body mass index (BMI) was calculated using measured heights and weights. Body weight status was assessed using the International Obesity Task Force (IOTF) age-and gender-specific BMI cutoffs for overweight and obesity. Potential confounders Analyses were adjusted for child's gender, parental education, and household income as assessed by categorical questions in the parental home survey as well as region of residence (urban or rural) determined by postal code. Energy intake was included in analyses that included YAQ data as is recommended. Academic achievement The Nova Scotia Department of Early Education and Childhood Development provided results for standardized provincial exams written by participants in grade 6 (spring 2012), 1 year following the measurement of other variables (spring 2011), in the subjects of Mathematics, Reading, and Writing. Results were provided as dichotomous values of 'meeting expectations' and 'not meeting expectations'. The Nova Scotia Department of Early Education and Childhood Development, who administers the exams, provides standardized rubrics for the exams to determine of children are meeting and not meeting expectations. Teachers from across the province are invited to assist in the marking of assessments. Further information about this process can be found at https://plans.ednet.ns.ca/about-plans. Analysis All analyses were weighted for non-response to represent provincial estimates of the grade 5 student population of Nova Scotia. Response weights were calculated based on postal-code level estimates of household data, available from Canadian census data for both participants and non-participants. As response rates were lower among the lowest income deciles, weights were applied to overcome non-response bias from lowerincome neighbourhoods in Nova Scotia. We applied mixed effects models due to the clustering of students within schools. Correlations between exposures of interest were evaluated and can be found in the supplementary material (Additional file 1). Univariable logistic regression was first used to assess the associations between each individual student's lifestyle behaviors, dichotomized into meeting and not meeting recommendations for each, and their academic achievement. Next, we used multivariable models (Model 1) to adjust for potential confounders and body weight status. Models for individual exposures of interest were run including potential confounders (nine separate models for each recommendation). Another model was run with confounders only, producing the results for Model 1 in Tables 2, 3, and 4 for confounders. Finally, we considered all lifestyle behaviors simultaneously in a full model (model 2) to assess independent associations between meeting each lifestyle behavior recommendation, body weight status, and academic achievement. To assess combined effects of meeting each lifestyle behavior recommendation, we also considered the effect of the number of recommendations met up to 9 (vegetables and fruit, grain products, milk and alternatives, meat and alternatives, saturated fat, free sugars, physical activity, sleep, and screen time). As with assessing independent associations, univariable and multivariable regression models were employed treating the score as both categorical and continuous to assess the cumulative impact of meeting lifestyle behavior recommendations and academic achievement. This analysis was conducted treating the score as a continuous variable and by splitting scores into three categories, low (meeting 1-3 recommendations), medium (meeting 4-6 recommendations) and high (meeting 7-9 recommendations). All analyses were conducted using Stata version 14.1 IC (StataCorp, Texas, USA). Table 1 shows that 87.4% of students met expectations for reading, 89.1% met expectations for writing, and 70.6% met expectations for mathematics. The percentage of children meeting selected lifestyle behavior recommendations was: 32.2% for Vegetables and Fruits, 20.8% for Grain Products, 56.0 for Milk and Alternatives, 86.4 for Meat and Alternatives, 54.3% for saturated fat intake, 62.6% for free sugars intake, 76.7% for physical activity as per the PAQ-C, 91.1% for sleep duration, and 77.8% for screen time. Meeting recommendations for milk and alternatives, meat and alternatives, free sugars, sleep, and screen time all had significant univariate associations with meeting expectations for mathematics ( Table 2, Model 1), while vegetables and fruit, grain products, milk and alternatives, meat and alternatives, saturated fat, and free sugars all had significant univariate associations with meeting expectations for reading ( Table 3, Model 1). Meeting recommendations for vegetables and fruit, meat and alternatives, free sugars, physical activity, sleep and screen time all had significant univariate associations with meeting expectations for writing. Parental level of education, household income, and gender were all significantly associated with the likelihood of meeting expectations for each subject, while obesity only had a significant univariate association with meeting expectations in mathematics. Results After adjusting for potential confounders, meeting recommendations for milk and alternatives, meat and alternatives, and free sugars continued to have significant positive associations with meeting recommendations for mathematics ( Table 2, Model 1). Where meeting expectations for reading was the outcome, meeting recommendations for vegetables and fruit, grain products, milk and alternatives, meat and alternatives, saturated fat and free sugars were all associated with increased likelihood of meeting expectations (Table 3, Model 1). Finally, meeting recommendations for meat and alternatives, free sugars, sleep, and screen time were all associated with increased likelihood of meeting expectations for writing (Table 4, Model 1). Body weight status did not have an association with any outcome in these models. When considering all lifestyle behaviors simultaneously, only meeting recommendations for milk and alternatives, and meat and alternatives remained significantly associated with meeting expectations for mathematics; meeting recommendations for free sugars was borderline significant ( Considering the combined effect of the nine criteria, for each additional criterion met, the odds of meeting the expectations for mathematics was 1.13 times higher (OR: 1.13 ). Table 5 shows that each additional criterion met also increased odds of meeting recommendations for reading by 1. 26 The criteria were also considered in groups representing low, medium, and high compliance. Respectively, 7.9, 64.6, and 27.4% were in the low, medium and high categories. Compared to the lowest category, children who were in the highest category had 1.47 times the odds of meeting expectations for mathematics ( Discussion We observed that meeting recommendations for diet, sleep and screen time had independent, positive effects for children's academic achievement. No association was found between meeting physical activity cutoffs and academic achievement. The findings from this study also indicated that the combined effects of meeting multiple lifestyle behavior recommendations had a stronger impact on academic achievement than the individual effects of lifestyle behaviors, particularly for reading and writing. We chose to evaluate lifestyle behaviors based on established recommendations that are widely accepted. Substantial efforts and resources go into the development and promotion of these recommendations, and findings from this study reveal that more efforts are needed to achieve compliance not only for the benefit of health, but for education. The associations between dietary behaviors and academic achievement are supported by previous literature [6,7,. However, the majority of studies linking diet and academic achievement have tended to focus on breakfast consumption and whole diet, and few studies have evaluated the relationship between established dietary recommendations and academic achievement. In particular, no study has evaluated the relationship between meeting newly released guidelines for free sugar recommendations, which exhibited a strong positive association with each of the three subjects. The lack of association between vegetables and fruit and academic achievement in this study seems inconsistent with much of the literature investigating the association between diet and academic achievement. In previous studies, servings of vegetables and fruit have been assessed as a continuous variable, with more servings being positively associated with academic achievement. In this study, very few children met the recommendation for vegetables and fruit. As such, there may not have been sufficient power to detect any positive effect of meeting recommendations for vegetables and fruit. We conducted additional analyses treating servings of vegetables and fruit as a continuous variable, however, and no significant effect on academic achievement was observed. Milk and alternatives, meat and alternatives, and sugars consumption were more consistent predictors of higher academic achievement. This may be a reflection of a higher income household that is more likely to access and purchase products within these groups more regularly. The observation that meeting designated cutoffs for physical activity levels associated with adequate cardiorespiratory fitness is not significantly associated with higher academic achievement complements existing findings in the literature investigating the relationship of physical activity with children's school performance. Much of the literature has concluded that the inclusion of more physical activity and physical education relative to other subjects in a school day does not negatively affect school performance. Many studies have found a relationship between children's physical activity levels and their academic achievement or cognitive development [11,13, though few studies have aimed to investigate its importance independent of other lifestyle behaviors. Another study found that the relationship between physical activity and academic achievement to be curvilinear, suggesting that children who are athletes may have many extra-curricular activities that displace time spent on academics. Though cutoff values are useful in identifying sufficient levels of physical activity for physical health benefits, they may not be the most appropriate way to assess the association between physical activity and academic achievement as this relationship appears to be more complex. An analysis was conducted using PAQ-C score as a continuous variable (results not shown) and physical activity continued to have no effect on academic achievement. However, though the PAQ-C is a well-used and validated questionnaire, its intent is to provide a broad overview of children's moderate-to-vigorous physical activity levels and it does not provide detailed information about other intensities or frequency of physical activities or the physiological benefits children gain from regular physical activity that would contribute to their academic achievement, among other contributors. In addition, there was a time lag (1 year) between physical activity assessment and exam writing which may have introduced error. The PAQ-C measures regularly moderate-to-vigorous physical activity over a one week period which would not adequately capture the variability in the frequency of physical activity over a 1 year period. As such, this lack of association may be due to the limitations of the measure used, and further investigation using a more detailed means of measurement of children's physical activity is needed. Sleep and screen time have been previously associated with academic achievement. This is the second study, to our knowledge, to demonstrate the independent importance of meeting sleep recommendations for these behaviors for academic achievement. In this study, sleep was found to only be associated with performance on writing exams. Sleep has been shown to be crucial for creativity and insight, key determinants of strong writing skills. This study is also among the first to evaluate sleep duration and academic achievement using the National Sleep Foundation guideline. Sleep is recommended within a range of hours, not as a minimum number of hours, and the finding that meeting these sleep recommendations is strongly associated with increased likelihood of meeting academic expectations in writing highlights the importance of both meeting the minimum requirements of sleep while not exceeding the maximum recommended number of hours. Few studies have investigated the association of body weight status and lifestyle behaviors simultaneously to determine their independent associations. Those that have, provide indication that it is not weight status that drives academic achievement, but that both academic achievement and body weight status are a result of longterm lifestyle behaviors. This study complements this important work and indicates that promoting healthy lifestyles and values, rather than focusing on obesity prevention and reduction, is most effective in supporting optimal health, holistic wellness and academic achievement [23,. Finally, the findings that the combined effects of lifestyle behaviors result in substantially higher likelihood of meeting expectations in all subjects is an important contribution to the literature. Ickovics et al. noted similar results in a sample of low-income American children of the same agechildren with higher levels of 'health assets' including indicators of healthy diet, physical activity, screen time and sleep, were 2.2 times more likely to meet goals in mathematics, reading and writing compared to those who had the fewest health assets. A Spanish study with the objective of investigating the combined effects of meeting recommendations for diet, physical activity, screen time and sleep on the self-reported grades of adolescents had similar findings for girls. The present study complements and expands on these important findings drawing on a large, population-based sample of children. Collectively, these studies speak to the value of school-based health promotion initiatives that are more comprehensive in their approach compared to initiatives that address only singular aspects of health. Successful models of a comprehensive school health approach that have led to improvement in the healthfulness of children's lifestyle behaviors have been well-evaluated. Interventions that aim to improve multiple lifestyle behaviors may have a greater impact on academic achievement than those that focus on single behaviors. Of note is girls' strong, positive association with the likelihood of meeting expectations for reading and writing. There was no gender effect for meeting expectations in mathematics. These results are consistent with other investigations about the school performance of girls and boys. Stratified analyses by gender revealed no substantial differences between effects of individual lifestyle behaviors and academic achievement across gender (results not shown). Children's test scores are influenced by a multitude of factors, and differential influences between girls and boys, including self-confidence and parental support for specific career streams, are among them. Further investigation is merited to determine the cause of the substantial gender differences in academic achievement in this population. This study has several key strengths. First, it is a very large, population-based sample of children who are representative of an entire Canadian province. This study evaluated lifestyle behaviors with respect to established recommendations, which allows for easier interpretation and specific targets for health promotion initiatives. Non-response weighting was employed in order to account for non-response of students residing in lowerincome neighbourhoods in Nova Scotia. This study also used results from standardized exams from several different subjects, both eliminating bias from self-reported grades and illustrating that lifestyle behaviors may have differential effects for different cognitive tasks. This study used validated questionnaires to assess diet and physical activity as well as directly measured heights and weights. Limitations of the study were the self-reported nature of lifestyle behaviors that can be prone to bias. The time lag between measurement of lifestyle behaviors and the writing of standardized exams may have introduced error to estimates of association. In the case of screen time, only TV watching was captured in this analysis and other forms of screen-based media which may be widely used were not included. In addition, questions used to assess sleep and screen time where not validated. As the CLASS study aims to collect a wide breadth of information, shorter, non-validated questions were used on surveys to reduce participant burden. Though a validated questionnaire was used to assess physical activity levels, the PAQ-C does not provided information about duration or intensity of physical activity, which is thought to be important in terms of its influence on academic achievement and relationship with meeting recommendations. As such, a lack of association may be due to the limited information provided by the tool used. Provided the large scope of this study, objective measurements of lifestyle behaviors were not feasible. Finally, standardized tests are only one means by which to evaluate children's academic success and their value has been disputed. Investigations of the relationship between healthy lifestyle behaviors and other measures of academic success including enjoyment of school, psycho-social well-being, and sense of belonging are important complements to consider. There is also possibility of residual confounding by unmeasured variables including IQ and measures of mental health. Conclusions This study demonstrates that individual lifestyle behaviors have independent, positive associations with academic achievement, and that cumulative effects of multiple healthy lifestyle behaviors have a stronger positive association with academic outcomes in Reading and Writing than any individual association. These findings suggest that school-based health promotion approaches that address multiple lifestyle behaviors instead of single behaviors may have more benefit for academic achievement. Future studies investigating a longitudinal link between lifestyle behaviors, body weight and academic achievement are important to strengthen the prospective findings of this study and others similar to it. Includes meeting recommendations for vegetables and fruit, grain products, milk and alternatives, meat and alternatives, saturated fat, free sugars, physical activity cutoffs, sleep, and screen time Results in bold are statistically significant (p < 0.05)
Tighter NZ bank rules could drive interest rates higher, UBS says. Photo/Getty Images. The Swiss diversified financial services company said the proposed capital ratio changes for New Zealand banks would make them among the highest in the world and could lead to increase of 80 to 125 basis points in mortgage interest rates. UBS, in a research note on the Australian banking sector, said the Reserve Bank's announcement to sharply increase the banks' minimum capital requirements was a surprise to investors and one that may lead the big four Aussie banks, who dominate the local banking scene, to cut their dividends. "While we are firm believers in strong, well-capitalised banks, we believe the proposals by the Reserve Bank to lift New Zealand bank capital requirements to the highest in the developed world appear excessive," UBS said. "This begs the question – is New Zealand's proposed bank-capital-initiative worth it and what are the risks? UBS said shareholders will expect a return on the $15 billion potential capital injection required to meet proposed new standards. "We disagree with the Reserve Bank conclusion that "this will only have a minor impact on borrowing rates for customers". "Equity is expensive, with a cost of capital of about 11 per cent, and we think shareholders will demand at least this return," it said. "We estimate banks will need to reprice their NZ mortgage books by about 80 basis points to 125 basis points," UBS said. UBS said the Reserve Bank's capital proposals appeared to be unnecessary and potentially damaging. "We believe the Reserve Bank's endeavours to strengthen the banks could come at a significant cost to the NZ economy as they appear to be materially underestimating the likely mortgage repricing," UBS said. "We see these proposals as expensive and unnecessary." "That said, if the Reserve Bank proceeds with its capital proposals, it may be the final catalyst for dividend cuts at the Aussie Majors." In a separate report, Craigs Investment Partners said an increase in required capital ratios would reduce the return on equity, which would affect the profitability of the New Zealand businesses. "With reduced profitability, we would expect to see New Zealand banks pull the pricing lever to offset and that a portion of the higher capital requirements will be borne by borrowers through higher borrowing costs," Cragis said. "We expect this would result in a reduction in system credit growth which would have implications for the housing market and overall economic growth." Australian financial services group Macquarie last month predicted mortgate the rate rises could be as much as 90 to 140 basis points if the four major Australian-owned banks had to raise an additional $18.9 billion. "The RBNZ paper suggests that the pricing response is likely to be immaterial," it said then. "While, theoretically, this may be conceivable, in practice we believe it is highly unlikely," Macquarie said. The Reserve Bank said in December that it would consult the market on its proposals. "Insisting that bank shareholders have a meaningful stake in their bank provides a greater incentive to ensure it is well managed," deputy governor and general manager of financial stability Geoff Bascand said then. "Having shareholders able to absorb a greater share of losses if the company fails also provides stronger protection for depositors," he said. The bank has been reviewing bank capital rules since early 2017. Possible rule changes would take place over the next five years. Submissions on the proposals close on March 22.
A Small Fibronectin-mimicking Protein from Bacteria Induces Cell Spreading and Focal Adhesion Formation* Fibronectin, a 250-kDa eukaryotic extracellular matrix protein containing an RGD motif plays crucial roles in cell-cell communication, development, tissue homeostasis, and disease development. The highly complex fibrillar fibronectin meshwork orchestrates the functions of other extracellular matrix proteins, promoting cell adhesion, migration, and intracellular signaling. Here, we demonstrate that CagL, a 26-kDa protein of the gastric pathogen and type I carcinogen Helicobacter pylori, mimics fibronectin in various cellular functions. Like fibronectin, CagL contains a RGD motif and is located on the surface of the bacterial type IV secretion pili as previously shown. CagL binds to the integrin receptor 51 and mediates the injection of virulence factors into host target cells. We show that purified CagL alone can directly trigger intracellular signaling pathways upon contact with mammalian cells and can complement the spreading defect of fibronectin−/− knock-out cells in vitro. During interaction with various human and mouse cell lines, CagL mimics fibronectin in triggering cell spreading, focal adhesion formation, and activation of several tyrosine kinases in an RGD-dependent manner. Among the activated factors are the nonreceptor tyrosine kinases focal adhesion kinase and Src but also the epidermal growth factor receptor and epidermal growth factor receptor family member Her3/ErbB3. Interestingly, fibronectin activates a similar range of tyrosine kinases but not Her3/ErbB3. These findings suggest that the bacterial protein CagL not only exhibits functional mimicry with fibronectin but is also capable of activating fibronectin-independent signaling events. We thus postulate that CagL may contribute directly to H. pylori pathogenesis by promoting aberrant signaling cross-talk within host cells. cific interactions of cell surface receptors with extracellular matrix (ECM) 2 proteins. The ECM is a meshwork of fibrillar and nonfibrillar components assembled into complex structures such as basement membranes. The latter provide a scaffold for cell adhesion, spreading, and migration. ECM regulates numerous cell functions by activating multiple signaling pathways at the adhesion sites. ECMs, composed of collagens, laminins, and other glycoproteins such as fibronectin (FN), serve as substrates for different adhesion molecules including the integrin family of transmembrane receptors. The assembly of ECM components into functional supramolecular modules is highly regulated. FN matrix assembly alone is a dynamic cell-driven process in which the soluble FN molecules assemble into insoluble fibrillar polymeric ECM structures. FN and integrin receptors play crucial roles in a variety of morphogenetic processes, which are regulated by processes termed outside-in and inside-out signaling cascades. Deregulation of integrin and FN functions associates with disease development including chronic inflammation, heart failure, cancer, and metastasis (7, 9 -11). The outside-in signaling triggered by ligation of integrin receptors with FN and other ECM components results in the reorganization of cytoskeletal and signaling molecules into complexes of more than 90 proteins (9 -13). This occurs by synergistic processes dependent on integrin aggregation and occupancy, as well as tyrosine phosphorylation. Integrins also cooperate with growth factor receptors such as epidermal growth factor receptor (EGFR) to enhance signaling. FN consists of multiple domains (classified types I-III) that show binding specificities for specific cell membrane receptors, collagen, fibrin, and heparin. FN alone is sufficient to induce highly efficient spreading of many mammalian cell types including fibroblast and epithelial cells in vitro. An important functional unit of FN is its RGD tripeptide motif, which acts in synergy with a PHSRN sequence for binding to integrins. In particular, the RGD motif is crucial for mediating eukaryotic cell adhesion and spreading. Remarkably, FN is no passive adhesive molecule but actively triggers signal transduction to the F-actin cytoskeleton and focal contact formation upon binding to integrins. Binding of FN to integrin ␣ 5 ␤ 1 results in the recruitment of focal adhesion kinase (FAK) and Src kinase and the subsequent activation of these kinases in the focal adhesion complexes. Although ECM proteins are unique to the eukaryotic kingdom, many bacterial pathogens adhere to host ECM molecules or integrins to exploit the downstream signaling pathways for entering host cells or establishing persistent infection. For example, the well characterized bacterial protein invasin (InvA) of Yersinia spp. has been shown to bind ␣ 5 ␤ 1 and some other integrins in a manner similar to FN. Immobilized membrane proteins of an InvA-overproducing Yersinia strain were observed to trigger spreading of HEp-2 cells. Although this effect of InvA was attributed to its ability to bind integrin ␤1, the molecular mechanisms involved have not been investigated in full detail. Recently, we reported that the gastric pathogen and type I carcinogen Helicobacter pylori (Hp) exploit integrin receptors for the injection of virulence factors into mammalian cells. This is achieved by a type IV secretion system (T4SS) consisting of 11 VirB protein orthologs (encoded by virB1-11 genes) and the so-called coupling protein (VirD4, an NTPase). These proteins are encoded by a 40-kb gene cluster known as the cag (cytotoxin-associated gene) pathogenicity island, the cagPAI. We have shown that integrin ␣ 5 ␤ 1 binds to a small 26-kDa protein designated CagL, which is encoded by the open reading frame HP0539 in the cagPAI. CagL is predicted to be a functional VirB5 ortholog and structural component of the T4SS pilus, as seen with other VirB5 proteins such as that of Agrobacterium tumefaciens. CagL has no significant sequence homology to any known eukaryotic protein. But like FN, CagL carries a RGD motif shown to be important for interaction with the ␣ 5 ␤ 1 integrin. However, it has been recently shown in yeast two-hybrid screens that other T4SS proteins such as CagY (VirB10), CagN, and the effector protein CagA can also bind ␤ 1 integrin in vitro, confirming that Hp targets this integrin member as a receptor for the T4SS. However, mutation of the RGD motif in CagL had no defect in T4SS functions such as the phosphorylation of injected CagA. In contrast, another very recent study showed a clear role of CagL in activating ADAM17, a metalloprotease involved in catalyzing ectodomain shedding of receptor tyrosine kinase ligands. In nonstimulated cells, ADAM17 is normally in complex with integrin ␣ 5 ␤ 1 and inactive. During acute Hp infection, however, it was shown that CagL dissociates ADAM17 from the integrin ␣ 5 ␤ 1 and activates ADAM17. This was confirmed by infection with a ⌬cagL deletion mutant, which is entirely defective in the latter response, and by genetic complementation with the wild-type (wt) cagL gene or biochemical complementation by the addition of extracellular CagL restoring this function. These studies indicate that there is a controversy in the literature about the importance of CagL in T4SS functions and host cell signaling. Thus, the role of CagL needs to be investigated in more detail. Investigating the contribution of each of the various cagPAI proteins for signaling during infection is certainly very difficult to perform because mutation of single genes often lead to complete abolition of T4SS functions. For this purpose, the aim of the present study was to investigate whether purified CagL alone can trigger host cell signaling. We demonstrate that purified CagL mimics a number of cellular functions of FN in the induction of eukaryotic cell spreading and focal adhesion formation, involving the activation of Src, FAK, and EGFR tyrosine kinases. Despite this functional mimicry between CagL and FN, CagL also activates the EGF receptor family tyrosine kinase Her3/ErbB3 under conditions where Her3/ErbB3 is not activated by FN. Our findings support the hypothesis that CagL may promote aberrant signaling cross-talk in the host cells by mimicking FN in the induction of cell spreading and focal adhesion formation while activating FN-independent signaling pathways. CagL may be used as a novel tool for dissecting the regulatory networks that govern FN and integrin signaling. EXPERIMENTAL PROCEDURES Eukaryotic Cell Culture and Bacteria-The human gastric adenocarcinoma cell line AGS, MKN45, HeLa, and several mouse fibroblast knock-out cell lines were cultured in RPMI1640 or DMEM, respectively, which were supplemented with 10% fetal calf serum (Invitrogen). Mouse knock-out cells deficient in focal adhesion kinase (FAK / cells) or fibroblasts derived from c-src /, c-yes /, and c-fyn / triple knock-out mouse embryos (SYF cells) as well as stable expression of wt FAK in FAK / cells or wt c-Src in SYF cells were described previously. Generation of the floxed FN / fibroblast cells and FN / knock-out cells has been described. The FN / cells were grown in DMEM supplemented with 10% fetal calf serum or, alternatively, in serum replacement medium (Sigma-Aldrich). After reaching a confluency of 70%, the cells were washed two times with phosphate-buffered saline and then starved for 12 h by incubation with fresh medium without fetal calf serum before the cells were trypsinized and prepared for the spreading assays as described below. Hp strains P1 and P1⌬cagL were grown as described. CagL Peptides and Purification of CagL and VirB10 Proteins-Several CagL-derived RGD peptides (peptide 1, cyclo-Arg-Gly-Asp-D-Leu-Ala-; peptide 2, cyclo-Arg-Gly-Asp-Leu-D-Ala-; and peptide 3, cyclo-Arg-Gly-Asp-Leu-D-Ala-Leu-) were synthesized as described. To construct the vectors for overexpression of wild-type CagL (CagL wt ) in Escherichia coli, a DNA fragment corresponding to amino acid residues 21-237 of the protein (minus the predicted signal peptide) was amplified by PCR, sequenced, and ligated into pET-28a vector (Novagen). Mutagenesis of the RGD motif in the CagL sequence to CagL RGA or CagL RAD was performed using a QuikChange sitedirected mutagenesis kit according to the instructions of the supplier (Stratagene). CagL wt, CagL RAD, and CagL RGA were overexpressed and purified by a standard protocol. Briefly, E. coli BL21(DE3) transformed with the plasmids were grown in 5 ml of LB medium at 37°C. After overnight incubation, 500 ml of fresh LB medium were added and shaken for another 2.5-3 h up to A 600 1. Then 1 mM isopropyl ␤-D-thiogalactopyranoside was added, and the bacteria were grown for 1.5 h to induce CagL expression. Bacterial pellets were collected by centrifugation and then resuspended in ice-cold buffer CW (50 mM KH 2 PO 4 -K 2 HPO 4, pH 7.5, 200 mM NaCl) supplemented with protease inhibitor mixture (Roche Applied Science). After sonication, the overexpressed CagL present in the inclusion bodies was solubilized in buffer LW (50 mM KH 2 PO 4 -K 2 HPO 4, pH 7.5, 200 mM NaCl, 6 M guanidine hydrochloride) and refolded in ice-cold refolding buffer (52 mM Tris-HCl, pH 8.2, 20 mM NaCl, 834 M KCl, 1.1 mM EDTA, 2.1 mM reduced glutathione, 210 M oxidized glutathione). After refolding, CagL was further purified by metal-chelate affinity chromatography through Talon resin (BD Biosciences) and gel filtration in buffer CW through Sephacryl S-200 (16/60) according to the manufacturer's instructions (Amersham Biosciences). Protein concentrations of the resultant samples were determined by the BCA protein assay (Pierce) and typically yielded a total amount of 1.5 mg of CagL in 10 ml of buffer. Under denatured conditions, purified CagL proteins run at a size of 26 kDa. Purification of CagL was judged to be of 95% homogeneity by SDS-PAGE/Coomassie Blue staining (supplemental Fig. S2). The folded conformations of the purified CagL wt, CagL RAD, and CagL RGA were confirmed by circular dichroism. No indication of post-translational modifications of purified CagL such as disulfide formation or methylation was detected. In addition to CagL, several VirB10 fragments were cloned for expression. Sequence encoding C-terminal VirB10 from Hp strain 13a (VirB10 C-term) was cloned to BamHI/EcoRI sites of pGEX-2T using forward (5-ACGGGATCCCTAGATA-AACTCATAGGCCTTGG-3) and reverse (5-ACGGAAT-TCTTAATTGCCACCTTTGGG-3) primers. Expression of GST and GST-VirB10 C-terminal fusion proteins was induced with isopropyl ␤-D-thiogalactopyranoside, and the proteins were subsequently purified from clarified lysates in the presence of protease inhibitor mixture (Roche Applied Science) using glutathione-Sepharose 4 Fast Flow (GE Healthcare) according to the manufacturer's instructions. Furthermore, His-tagged VirB10 repeat proteins, VirB10 Rpt 2.1 and 2.2 from strains Q86A and 13a, respectively, were expressed and purified as described previously. The exact coordinates of all VirB10 fragments are given in Fig. 7A. Precoating of Petri Dishes and Cell Spreading Assays-Cell spreading assays and quantitation of spread cells were performed according to procedures described previously. Briefly, each well of the microtitre plates was coated with 100 l of 50 g/ml ligand (fibronectin (from human plasma; Sigma), purified CagL variants, RGD peptides, VirB10 proteins, BSA (Sigma)), or polylysine (0.01% solution; Sigma) at 4°C overnight. In the case of heat-denatured CagL (CagL hdn ), the protein was boiled for 10 min and then incubated under identical conditions. For double coatings, the plates were first incubated at 4°C overnight each with polylysine and then with CagL wt. Nonspecific binding sites were blocked by incubation with 5% BSA in buffer CW for 2 h at room temperature. The cells were grown as described above, trypsinized, and then treated with soybean trypsin inhibitor according to the instructions of the supplier (Sigma). After washing with phosphate-buffered saline, 4 10 5 cells in RPMI or DMEM were added to the wells and incubated in a time course. One hundred cells were randomly evaluated for cell spreading. The cells were scored as spread or not spread as described. The percentage of spreading of each cell line from a representative experiment is shown in Fig. 2. The pharmacological inhibitors AG1478 (BIOSOURCE; 10 M), PP2 (Calbiochem; 10 M), and PF-573228 (Tocris; 10 M) were added in some experiments as indicated. The experiments were repeated at least three times with similar results. Live Cell Imaging-For live cell imaging experiments, the cells were grown in monolayers for 2 days in conventional flasks using RPMI or DMEM with 10% fetal calf serum. After reaching a confluency of 70%, the cells were washed two times with phosphate-buffered saline and then starved for 12 h by incubation with fresh medium without fetal calf serum. The cells were trypsinized and then treated with soybean trypsin inhibitor according to the instructions of the supplier (Sigma). Three washing steps followed using fresh RPMI or DMEM, respectively. The cells were then deposited onto precoated 35-mm Petri dishes placed in a prewarmed (37°C), humidified, and equilibrated (5% (v/v) CO 2 ) incubation chamber (PeCon GmbH) mounted on an inverted microscope (Leica DM IRE2, Leica Microsystems) equipped with a CCD camera (Spot RT, Diagnostic Instruments Corp.) and controlled by image acquisition software (MetaVue, Molecular Devices Corp.). A 40 phase contrast objective (Leica Microsystems) was also used to record cell spreading. The acquisition time interval was 2 min, and the transmitted light was switched on only during the exposure time of the CCD camera (1 s) controlled by the acquisition software. Alternatively, the microscopic analysis was performed using a confocal laser scanning microscope system TCS SP2 (Leica Microsystems) equipped with a DM-IRE2 inverted microscope and an incubation chamber (PeCon). Image data were obtained at time intervals of 2 min using a 63/1.4 N.A. oil immersion objective and CLSM software (Leica Microsystems). Image data sets of both microscope systems were processed using ImageJ software. Immunofluorescence Staining and Microscopy-Immunofluorescence staining was performed as described. In brief, cells fixed in 3.4% paraformaldehyde were stained with different antibodies as shown in each experiment. The samples were analyzed using a Leica TCS SP2 microscope system equipped with a DM-IRE2 microscope and different lasers (Leica Microsystems). To avoid spectral overlap and channel cross-talk, fluorescein isothiocyanate, TRITC, CY5, and Alexa-350 fluorophores were excited sequentially with argon laser (488 nm), green helium (543 nm), red helium (633 nm), and UV laser (364 nm). The images were processed using ImageJ. Field and Immuno Field Emission Scanning Electron Microscopy (FESEM)-Procedures for FESEM of Hp and spread cells were carried out as described previously. For immuno-FESEM of CagL, Hp samples were incubated with purified rabbit ␣-CagL IgG antibodies (100 g IgG protein/ml) followed by incubation with 15-nm protein A-gold particles as described. All of the samples were coated with a thin carbon film. FESEM of spread cells was performed using procedures as described previously. The images were processed for contrast and brightness using Adobe Photoshop. Statistical Analysis-All of the data were evaluated using Student's t test with SigmaStat statistical software (version 2.0). Statistical significance was defined by p 0.05 (*) and p 0.005 (**). All of the error bars shown in the figures and those quoted following the signs represent standard deviations. CagL Is a Surface Protein of Hp That Can Induce Eukaryotic Cell Spreading and Focal Adhesion Formation in Vitro-CagL has been shown to be a component of the T4SS pilus and has no significant sequence homology to any known eukaryotic pro- tein. However, similar to FN, CagL carries a RGD motif (Fig. 1, A and B) shown to be important for interaction with the ␣ 5 ␤ 1 integrin in vivo and in vitro. CagL is expressed on the surface of Hp even in the absence of host cells (Fig. 1C and supplemental Fig. S1). After host cell contact, CagL decorates the T4SS pili surface, suggesting that the protein may act as some kind of a molecular sensor on the bacteria. Here, we further examined the ability of CagL to trigger intracellular signaling pathways. For this purpose, we overexpressed CagL in E. coli and purified the recombinant protein to homogeneity (supplemental Fig. S2). In our in vitro binding studies of AGS gastric epithelial cells, we noticed during phase contrast microscopy that eukaryotic cells not only bound to immobilized CagL on Petri dishes but that they were also triggered to spread ( Fig. 2A). Cell spreading induced by CagL was surprising because such a feature is only known for some eukaryotic ECM proteins including FN. CagL-induced cell spreading was phenotypically very similar to that seen with immobilized FN ( Fig. 2A) and was also verified at high resolution by scanning electron microscopy (Fig. 2B). A time course shows that AGS cell spreading is not as rapid as the cell spreading induced by FN but became very efficient after 30 -60 min of interaction ( Fig. 3A and supplemental Movie S1). To exclude artifacts or cell typespecific effects of the observed phenomenon, CagL-induced cell spreading was confirmed with other cultured cell lines including human HeLa, human MKN45, and mouse fibroblasts (Fig. 3, B and C). Moreover, wild-type CagL (CagL wt ) also induced the formation of focal adhesions in host cells (Fig. 2, arrows) as confirmed by immunostaining using antibodies specific for the focal adhesion marker proteins vinculin and focal adhesion kinase (FAK) as well as co-staining with phalloidin (for F-actin) (Fig. 4, arrows, and data not shown). As controls, these cells do not spread on immobilized BSA, GST, polylysine, or heat-denatured CagL (CagL hdn ) within 4 h (Figs. 2, 3, A and B, and 7C), suggesting that the spreading was CagL-specific and requires proper folding of CagL. Three-dimensional modeling predicts that CagL forms a three ␣-helix bundle with a protruding globular domain carrying the RGD motif in a sur- face exposed loop. In contrast, the crystal structure of the FN type III domain encompassing the RGD motif is composed predominantly of ␤-sheets and is thus entirely different from the predicted structure of CagL (supplemental Fig. S3). CagL Induces Focal Adhesion Dynamics and Phosphotyrosine Signaling during Cell Spreading in an RGD-dependent Manner- The findings above suggest that CagL may mimic FN upon interaction with host cells. To confirm this hypothesis, we next investigated whether tyrosine phosphorylation of host cell proteins is induced during CagL-mediated cell spreading. Host cells were allowed to spread on immobilized CagL wt for 1 h, fixed, and then stained using a pan-␣-phosphotyrosine antibody. The results show that CagL wt induces the tyrosine phosphorylation of host cell proteins in the cytoplasm and focal adhesions, which were co-stained with an ␣-vinculin antibody (Fig. 5A, arrows). This suggests that upon binding to host cells, CagL induces tyrosine kinase signaling. This prompted us to investigate the pattern of phosphorylated proteins by Western blotting. The results show that immobilized CagL wt induces the tyrosine phosphorylation of four major protein species, at 60, 120, 170, and 200 kDa (Fig. 5B, arrows). The identity of these proteins was confirmed by Western blotting using phosphospecific antibodies against well known host signaling proteins in these size ranges. The proteins were identified as the two nonreceptor tyrosine kinases Src (60 kDa) and FAK (120 kDa) and the two growth factor receptor tyrosine kinases EGFR (epidermal growth factor receptor, 170 kDa) and Her3/ErbB3 (200 kDa), respectively (Fig. 5C, arrows). Interestingly, cell spreading on FN induced similar phosphorylated proteins as expected, but significant differences were also observed between the CagL wt -and FN-induced phospho-patterns (Fig. 5C, asterisks). Although the extents of phosphorylation of Src and FAK were similar, the activation of EGFR by FN was significantly reduced, and almost no activation of Her3/ ErbB3 by FN was seen. The quantification data of tyrosine kinase activities are shown in supplemental Fig. S4. Next, we asked whether the RGD motif is important for CagL-induced signaling. Mutagenesis of the RGD motif to either RAD (CagL RAD ) or RGA (CagL RGA ) led to a profound inhibition of both CagLinduced cell spreading and tyrosine kinase activation (Figs. 5C and 6), suggesting that the RGD motif of CagL is crucial for triggering the host cell responses, which are also stimulated by human FN. We have recently shown that co-incubation of AGS cells with CagL-derived RGD peptides leads to the induction of FAK and Src phosphorylation. Thus, we immobilized various CagL-specific RGD peptides on Petri dishes followed by cell spreading analysis. The peptides alone did not induce cell spreading. The cells incubated on immobilized RGD peptides remained in a round shape similar to the cells incubated on heat-denatured CagL (supplemental Fig. S5). These observations strongly suggest that the RGD motif alone is not sufficient for triggering cell spreading. Taken together, our findings suggest that although the RGD motif of CagL plays a key role, other structural determinants in CagL are also required for the induction of cell spreading. Eukaryotic Cell Spreading Induced by Immobilized CagL Requires FAK, Src, and EGF Receptor Tyrosine Kinases-To characterize the cellular effects of CagL in more detail, we next tested whether CagL wt can induce spreading of mouse fibroblast cells deficient in focal adhesion kinase (FAK / cells) or fibroblasts derived from c-src /, c-yes /, and c-fyn / triple knock-out mouse embryos (SYF cells). Interestingly, neither FAK / cells (Fig. 6, A-C) nor SYF cells (Fig. 6, D-F) were able to spread efficiently on CagL wt. Stable expression of wt FAK in FAK / cells or wt c-Src in SYF cells restored cell spreading, suggesting that both FAK and Src kinase signaling play an important role in CagL-induced cell spreading (Fig. 6). In addition, pharmacological inhibition of Src family kinases by PP2, FAK by PF-573228, and the receptor tyrosine kinases EGFR and Her3/ErbB3 by AG1478 also suppressed cell spreading on CagL, confirming that each of these activated components are indeed necessary for CagL signaling (supplemental Fig. S6). The Internal Repeat Region of VirB10 but Not the C terminus Can Enhance CagL-induced Cell Spreading-Next, we asked whether the observed effect of CagL is specific for this protein or whether similar observations can be made with the Hp CagY (VirB10) protein, which can also bind integrin ␤ 1. For this purpose, the internal repeat region 2 and the C-terminal fragment were cloned, purified, and immobilized for cell spreading assays (Fig. 7A). The results show that neither of the two fragments was able to induce efficient cell spreading (Fig. 7, B and C). However, whereas the C-terminal VirB10 behaved like GST or BSA controls with no signs of spreading, repeat region 2 from two strains showed some weak activity. When CagL wt and either repeat region were mixed together, the cell spreading effect of CagL was enhanced (Fig. 7, B and C). CagL Can Functionally Complement FN to Induce Cell Spreading of Fibronectin / Fibroblasts-Finally, we investigated whether CagL can functionally complement FN and induce cell spreading of knock-out fibroblasts deficient in the gene encoding FN (FN / cells). Interestingly, CagL wt but not CagL RAD or CagL RGA induced spreading of FN / cells (Fig. 8, A-C, and supplemental Movie S2). To confirm that CagL wt can restore the function of FN, we performed cell spreading assays of trypsinized FN / and FN / cells for 2-10 h on either polylysine, a substrate that keeps integrins in a nonactivated form, or polylysine mixed with CagL wt. Although neither FN / nor FN / cells were able to spread on any of these substrates between 2 and 4 h, FN / cells spread efficiently at later time points (4 -10 h), which can be explained by secretion of FN at these time points. The spreading of FN / cells on polylysine was significantly enhanced by the presence of CagL wt (Fig. 8D). By direct comparison, FN / cells were unable to spread efficiently on polylysine even at 10 h, whereas the addition of CagL wt induced cell spreading to an extent similar to that observed with the spreading of FN / cells on polylysine alone (Fig. 8E). Collectively, these data strongly support the view that CagL mimics the function of FN and can even restore its function in FN / cells. DISCUSSION Human FN and its RGD tripeptide motif play an important role in mediating eukaryotic cell-to-cell interactions and triggering signaling, which is important for cell adhesion, spreading, migration, and other processes. The importance of the RGD motif is underlined by the finding that mouse embryos with a mutation to an inactive RGE motif die at day 10 because of severe defects that resemble the phenotype of integrin ␣ 5 ␤ 1 -deficient mice. Other studies indicated that FN is a target of microbial pathogens because a number of bacterial proteins have been found to bind FN for adhesion to or invasion of bacteria into host cells. In the present report, we show that the Hp CagL protein mimics FN in the induction of cell spreading, focal adhesion formation, and activation of Src, FAK, EGFR, and Her3/ErbB3 in vitro. CagL therefore represents a novel example of molecular mimicry between a eukaryotic ECM protein and a virulence factor of bacterial origin. We observed that immobilized CagL wt induced host cell spreading and focal adhesion formation that was phenotypically similar to that seen with immobilized FN. The induction of cell spreading by CagL was not as rapid as that induced by FN but became very efficient after 30 -60 min of co-incubation. To exclude artifacts, we confirmed that host cells do not spread on immobilized BSA, GST, or polylysine. We also investigated whether CagL can induce cell spreading and focal adhesion The stainings show that cells spreading on immobilized CagL wt induce phosphotyrosine signaling in the cytoplasm and focal adhesion complexes (arrows). B, the global pattern of phosphorylated proteins induced by CagL wt were investigated by Western blotting using a pan-␣-phosphotyrosine antibody. The results show that immobilized CagL wt induces the tyrosine phosphorylation of four major protein species, at 60, 120, 170, and 200 kDa (arrows). C, using commercially available phospho-specific antibodies, the proteins were identified as tyrosine kinases Src (60 kDa), FAK (120 kDa), EGFR (170 kDa), and Her3/ErbB3 (200 kDa), respectively (arrows). The ␣-glyceraldehyde 3-phosphate dehydrogenase (GAPDH) blot confirms that equal amounts of protein were loaded in each lane. Parallel cell spreading experiments for 1 h were performed on immobilized fibronectin, CagL RGA, CagL RAD, and BSA as indicated. Mutagenesis of the RGD motif resulted in a profound inhibition of CagL-induced tyrosine kinase activation, suggesting that CagL uses its RGD motif to trigger similar host cell responses as human fibronectin. Quantitation data of kinase activities were done densitometrically and are shown in supplemental Fig. S4. formation of different human cell lines (AGS, MKN45, and HeLa) and mouse fibroblast cell lines, thus excluding cell typespecific effects of the observed phenomenon. Moreover, as another control, we could demonstrate that denatured CagL hdn was unable to induce cell spreading and focal adhesion formation, suggesting that proper folding of CagL is essential for the induction of these phenotypes. Finally, we showed that neither immobilized CagL RGA nor CagL RAD mutants were able to induce these responses. Taken together, these findings provide comprehensive evidence that Hp CagL not only interacts with the integrin member ␣ 5 ␤ 1 but is also capable of inducing signaling leading to cell spreading and focal adhesion formation. In healthy tissues, maximal binding of FN to ␣ 5 ␤ 1 integrin requires two internal domains, called FnIII-9 and FnIII-10 (Ref. 48 and Fig. 1A). Genetic studies indicated that residues in both domains are involved in contacting the integrin receptor, with aspartate residue 1495 in the RGD motif of FnIII-10 being the most significant contributor to binding energy. Several other residues located on the same face of the molecule as the RGD sequence of FN also contribute to integrin binding, including those within the so-called synergy region in FnIII-9 (48 -50), as well as residues located between the synergy and RGD motif sites. Interestingly, CagL wt can bind to integrin ␣ 5 ␤ 1 (dissociation constant K d 0.09) and with an affinity higher than that of CagL RGA mutant (K d 0.36) or FN (K d 0.8). However, except for the RGD motif, there is no sequence similarity of CagL to FN, and the three-dimensional structure model of CagL is mainly formed by a three ␣-helix bundle with an protruding globular domain carrying the RGD motif. Interestingly, the crystal structure of the FN host cell binding domain is composed predominantly of ␤-sheets and therefore appears to be quite different from the predicted structure of CagL. The fact that cyclic CagL-derived RGD peptides alone fail to induce cell spreading confirms that additional structural determinants in CagL are required for triggering cell spreading; identification of these structural determinants by means of site-directed mutagenesis is currently underway in our labs. Determination and subsequent comparison of the three-dimensional structure of CagL with that of FN might provide novel insights into the structural basis underlying the induction of cell spreading or focal adhesion formation by external stimuli. CagL also exhibits neither sequence nor structural homology to the well known bacterial protein invasin (InvA, Yersinia spp.), which binds ␣ 5 ␤ 1 and some other integrins in a manner similar to FN but does not contain an RGD motif. It is intriguing that invasin and FN recognize similar residues on the integrin receptor, given that the solved crystal structures of their respective integrin-binding regions have very different surface contours. Three important factors have been proposed to enhance invasin-mediated uptake: (i) high affinity binding of integrin receptors by the so-called D4-D5 superdomain, (ii) the ability of invasin monomers to undergo homotypic interactions, and (iii) an increase in the concentration of integrin receptors available to bind invasin. Mutations that lower the affinity of InvA for integrin receptors, deletion of a region of invasin necessary for homotypic interaction, and depletion of integrins from the host cell all severely depress bacterial uptake, causing extracellular adhesion of the bacteria. Most of the bacteria-host interaction studies used InvA-coupled latex beads or invA mutant bacteria. Nevertheless, the ability of Petri dish-immobilized purified InvA to induce cell spreading or focal adhesion formation of host cells was only marginally investigated. For example, HEp-2 cells incubated on immobilized membrane proteins of an InvAoverproducing Yersinia strain showed signs of spreading. In a similar study, incubation of immobilized purified InvA with T-lymphocytes led to the rapid apoptotic death of these cells. Despite these investigations, the specificity and mechanism by which the Yersinia protein InvA mediates cell spreading in an integrin-dependent manner remain enigmatic. The most striking difference between Yersinia invasin and FN binding is the significantly higher affinity of invasin-receptor binding. This activity is critical both for the protein to promote uptake and as a central virulence determinant for the microorganism. Low affinity integrin ligands, coated on either particles or bacteria, allow efficient adhesion to mammalian cells but have a greatly reduced capacity to promote uptake relative to that seen with invasin. Given the observation that bacteria expressing active InvA blocked binding of CagL to integrin ␣ 5 ␤ 1, the affinity of InvA to integrin ␣ 5 ␤ 1 is clearly higher (K d 0.005) than that of CagL or FN. This might explain why CagL does not act as an invasin. Hp is essentially an extracellular pathogen with only 2-5% of bacteria occasionally observed intracellularly during infections in vitro and in vivo. Thus, the majority of Hp bacteria remains extracellular and triggers intracellular host cell signaling from the outside. The latter dogma is in full agreement with these observations. Our findings support the hypothesis that CagL functions as a specialized adhesin that not only anchors the T4SS to the host surface through binding to integrin but FIGURE 7. The internal repeat region of VirB10 but not its C terminus can enhance CagL-induced cell spreading. A, schematic representation of the entire CagY (also called VirB10) protein and its domains as reported recently. Approximate amino acid positions of each region are given. The indicated C-terminal (C-term) VirB10 domain of the CagY protein shares 31% identity with 55% of the A. tumefaciens VirB10 protein. The putative secreted T4SS pilus-associated form of VirB10 comprises the large repeat 2 region encoded between the two transmembrane domains. The following fragments were purified: VirB10 repeat region 2.1 (Rpt 2.1, Hp strain Q86A, 578 amino acids long), VirB10 repeat region 2.2 (Rpt 2.2, Hp strain 13a, 799 amino acids long), and VirB10 C-terminal region (C-term, Hp strain 13a, 109 amino acids long). To investigate whether these VirB10 proteins can also induce cell spreading, respective assays were performed with fibroblast cells. B, quantitation of spread cells at 1-and 2-h time points is shown. The results demonstrate that CagL wt can induce profound cell spreading of fibroblasts, whereas all VirB10 fragments or GST and BSA cannot. When CagL was mixed with VirB10 fragments, the repeat region 2 but not the C terminus enhanced the CagL effect significantly. C, representative phase contrast micrographs of fibroblast cells incubated with the indicated Hp proteins, respectively. Bar, 10 m. Quantitation data of three independent experiments are shown. also promotes intracellular signal transduction as shown in the present study. Finally, we demonstrated that binding of host cells to immobilized CagL triggers tyrosine phosphorylation of a number of signaling proteins that play known key roles in cell adhesion and proliferation. Western blotting using pan-phosphotyrosine antibodies indicated that immobilized CagL wt induces the tyrosine phosphorylation of four major protein species, which were identified as the two nonreceptor tyrosine kinases Src (60 kDa) and FAK (120 kDa) and the two growth factor receptor tyrosine kinases EGFR (170 kDa) and Her3/ErbB3 (200 kDa), respectively. Our finding that purified CagL wt alone can activate Src and FAK upon cell contact in vitro extends our previous observations that wt Hp but not the ⌬cagL mutants induced Src and FAK activation during infection of AGS cells. In addition, our present study identified a long awaited bacterial factor that can activate EGFR. The fact that Hp profoundly activates EGFR has been known for some time (59 -62). However, it was unclear whether EGFR can be activated by a structural T4SS component, a translocated cagPAI effector molecule, or another factor of Hp. Furthermore, the data presented here indicate that CagL alone can activate not only EGFR but also Her3/ErbB3, another member of this proto-oncogenic growth factor receptor family. The biological consequence of the activation of Her3/ ErbB3 by CagL during infection is currently under investigation. In addition, we demonstrated that activation of Src, FAK, EGFR, and Her3/ErbB3 by CagL proceeds in a RGD-dependent manner that is required for cell spreading and focal adhesion formation. A putative signaling model involving CagL-triggered integrin activation, tyrosine kinase activation, and cell spreading is depicted in Fig. 9. Because CagL exhibits multiple protein signals on the bacterial cell surface (Fig. 1C) and T4SS-pili during infection in vivo, we propose that CagL not only binds to integrin ␣ 5 ␤ 1 but may also induce integrin clustering. This conclusion is also supported by our previous observations showing that CagL-coated latex beads induced integrin ␣ 5 ␤ 1 clustering when co-incubated with AGS cells. Clustering of integrins by extracellular substrates generates a variety of intracellular signals, including tyrosine phosphorylation of cytoskeleton-associated and other factors (9 -12). We propose that binding of CagL to integrin ␣ 5 ␤ 1 has similar effects. Integrins also cooperate with growth factor receptors and induce their transactivation. For example, it has been shown that EGFR transactivation can be mediated by FN. EGFR transactivation requires metalloproteinase cleavage of proHB-EGF, and Hp-stimulated EGFR transactivation has the same requirement. Interestingly, integrin ␣ 5 ␤ 1 and ADAM-17 can physically interact in vitro and co-localize in HeLa cells and AGS cells. Given that Hp promotes cell proliferation through EGFR transactivation by ADAM activation, our present data support the hypothesis that CagL-integrin interaction activates the metalloprotease ADAM-17 and subsequently EGFR. Taken together, CagL is capable of mimicking a number of FN functions in vitro and can even trigger efficient spreading of FN / knock-out cells. The notion that convergent evolution has resulted in a molecular mimicry between CagL, a bacterial surface virulence protein, and FN, a eukaryotic extracellular matrix protein, is intriguing. Whether the persistent colonization of Hp of the human stomach epithelium and hence their co-evolution is one of the driving forces remains to be tested. Nonetheless, the FN-like properties of CagL and the stimulation of cancer-associated signaling by CagL are likely to play crucial roles in persistent Hp infection and may provide new insights into the molecular basis of Hp-induced carcinogenesis and metastasis. Based on the observation that CagL and not FN can activate Her3/ErbB3, it is tempting to propose that CagL may promote abnormal signaling cross-talk by aberrantly linking FN-dependent and FN-independent signaling pathways in the target cell. Interestingly, a yeast twohybrid screen and GST pulldown assays revealed that T4SS-associated CagY (VirB10) and CagA proteins also bind to ␤ 1 integrin, which was proposed to facilitate the injection of CagA in an RGDindependent manner. However, it remained unclear whether and how binding of VirB10 and CagA to extracellular ␤ 1 integrin receptor could trigger intracellular signaling. Irrespective of the specific contribution of individual T4SS proteins (CagL, VirB10, and CagA) toward CagA injection as investigated previously, the present data clearly demonstrate the functional importance of CagL alone in triggering transmembrane signaling to activate EGFR, Her3/ErbB3, Src, FAK, and probably other factors. Interestingly, when the purified repeat region 2 or the C terminus of VirB10 was immobilized, neither of these fragments could induce efficient cell spreading. Remarkably, however, when we mixed CagL with VirB10, the repeat region 2 but not the integrin ␤1 interacting C terminus enhanced the CagL effect (Fig. 7). This finding suggests that the internal repeat region of VirB10 and CagL may act cooperatively and that the C-terminal interaction of VirB10 with integrin ␤1 has a different function, further confirming that the observed cell spreading effect is specific for CagL. If other Hp factors such as extracellularly added VirB10 or CagA can also trigger similar and/or other intracellular signaling pathways and whether CagL-mediated activation of EGFR, Her3/ErbB3, Src, and Fak contributes to the injection of CagA during infection need to be investigated in future studies. Nevertheless, it seems clear that bacterial factors such as CagL, VirB10, and CagA, which interfere with host surface factors, could be used as novel tools to study integrin signaling and could be promising candidates as novel drug targets for specific intervention of the particularly severe gastric diseases caused by cagPAI-positive Hp strains.. Hypothetical model for CagL-dependent integrin targeting and activation of intracellular tyrosine kinase signaling. CagL mimics FN using its RGD motif to bind to ␣ 5 ␤ 1 integrin. CagL RGA or CagL RAD mutants show low affinity binding, with cells being round and no signs of cell spreading or focal adhesion formation. We propose that CagL is able to trigger high affinity interactions, integrin clustering, and profound activation. Activated integrins can then stimulate FAK activity by phosphorylation of tyrosine 397, which is a major binding site for Src. CagL-stimulated integrins may also activate EGF and Her3/Neu receptors, possibly by stimulating a metalloprotease of the ADAM family. The N-terminal domain of FAK also mediates the association with activated EGF receptor signaling complexes, and this association and FAK phosphorylation at tyrosine 397 are important for growth factor stimulated actin rearrangements, cell spreading, and cell motility.
<gh_stars>0 /* * Copyright 2019 Google Inc. * * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #ifndef SkSVGText_DEFINED #define SkSVGText_DEFINED #include <vector> #include "modules/svg/include/SkSVGTransformableNode.h" #include "modules/svg/include/SkSVGTypes.h" class SkSVGTextContext; // Base class for text-rendering nodes. class SkSVGTextFragment : public SkSVGTransformableNode { public: void renderText(const SkSVGRenderContext&, SkSVGTextContext*, SkSVGXmlSpace) const; protected: explicit SkSVGTextFragment(SkSVGTag t) : INHERITED(t) {} virtual void onRenderText(const SkSVGRenderContext&, SkSVGTextContext*, SkSVGXmlSpace) const = 0; private: SkPath onAsPath(const SkSVGRenderContext&) const final; using INHERITED = SkSVGTransformableNode; }; // Base class for nestable text containers (<text>, <tspan>, etc). class SkSVGTextContainer : public SkSVGTextFragment { public: SVG_ATTR(X, std::vector<SkSVGLength>, {}) SVG_ATTR(Y, std::vector<SkSVGLength>, {}) SVG_ATTR(Dx, std::vector<SkSVGLength>, {}) SVG_ATTR(Dy, std::vector<SkSVGLength>, {}) SVG_ATTR(Rotate, std::vector<SkSVGNumberType>, {}) SVG_ATTR(XmlSpace, SkSVGXmlSpace, SkSVGXmlSpace::kDefault) void appendChild(sk_sp<SkSVGNode>) final; protected: explicit SkSVGTextContainer(SkSVGTag t) : INHERITED(t) {} void onRenderText(const SkSVGRenderContext&, SkSVGTextContext*, SkSVGXmlSpace) const override; bool parseAndSetAttribute(const char*, const char*) override; private: void onRender(const SkSVGRenderContext&) const final; std::vector<sk_sp<SkSVGTextFragment>> fChildren; using INHERITED = SkSVGTextFragment; }; class SkSVGText final : public SkSVGTextContainer { public: static sk_sp<SkSVGText> Make() { return sk_sp<SkSVGText>(new SkSVGText()); } private: SkSVGText() : INHERITED(SkSVGTag::kText) {} void onRenderText(const SkSVGRenderContext&, SkSVGTextContext*, SkSVGXmlSpace) const override; using INHERITED = SkSVGTextContainer; }; class SkSVGTSpan final : public SkSVGTextContainer { public: static sk_sp<SkSVGTSpan> Make() { return sk_sp<SkSVGTSpan>(new SkSVGTSpan()); } private: SkSVGTSpan() : INHERITED(SkSVGTag::kTSpan) {} using INHERITED = SkSVGTextContainer; }; class SkSVGTextLiteral final : public SkSVGTextFragment { public: static sk_sp<SkSVGTextLiteral> Make() { return sk_sp<SkSVGTextLiteral>(new SkSVGTextLiteral()); } SVG_ATTR(Text, SkSVGStringType, SkSVGStringType()) private: SkSVGTextLiteral() : INHERITED(SkSVGTag::kTextLiteral) {} void onRender(const SkSVGRenderContext&) const override {} void onRenderText(const SkSVGRenderContext&, SkSVGTextContext*, SkSVGXmlSpace) const override; void appendChild(sk_sp<SkSVGNode>) override {} using INHERITED = SkSVGTextFragment; }; class SkSVGTextPath final : public SkSVGTextContainer { public: static sk_sp<SkSVGTextPath> Make() { return sk_sp<SkSVGTextPath>(new SkSVGTextPath()); } SVG_ATTR(Href , SkSVGIRI , {SkString()} ) SVG_ATTR(StartOffset, SkSVGLength, SkSVGLength(0)) private: SkSVGTextPath() : INHERITED(SkSVGTag::kTextPath) {} void onRenderText(const SkSVGRenderContext&, SkSVGTextContext*, SkSVGXmlSpace) const override; bool parseAndSetAttribute(const char*, const char*) override; using INHERITED = SkSVGTextContainer; }; #endif // SkSVGText_DEFINED
Cable Positive received the TV Cares “Ribbon of Hope Award” from the Academy of Television Arts & Sciences. The cable industry’s AIDS-action organization was recognized for its “Join the Fight” public-service-announcement campaign. Sesame Workshop, The WB Television Network’s Everwood and ABC’s Extreme Makeover: Home Edition were also honored at the ceremony, which was held at the Leonard Goldenson Theatre in Hollywood.
Randomised trial of high doses of stilboestrol and ethisterone in pregnancy: long-term follow-up of mothers. In 1950 a trial was set up to evaluate the effects of large doses of stilboestrol and ethisterone on rates of fetal loss in pregnant diabetic women. Eighty women were allocated at random to receive the hormonal treatment and 76 to receive inactive tablets of identical appearance. At follow-up 27 years later, information was obtained about 97% of the women, all but four being traced. All respondents were unaware of who had received hormones. The overall mortality was 4.5 times that of women of comparable age in England and Wales, most deaths being from complications of diabetes. More tumours, mainly benign, of the reproductive tract were reported in the hormone-exposed than the non-exposed group (14 (18%) and two (3%) respectively). Four cases of malignant breast disease were reported in the hormone-exposed women and none in the non-exposed. These findings support other evidence linking oestrogen treatment and breast cancer and suggesting that the latent period before the tumour becomes clinically apparent may be 15 years or longer.
If you're among the many listeners who became obsessed with Serial, we can relate. The spoof-worthy true-crime podcast quickly became a viral hit, but there is a whole world out there of podcasts to listen to! If Serial was your first time dipping your toe into the podcast pool, we have lots of suggestions for programs that are similar but in very different ways. Take a look ahead to find your next favorite podcast and then get all the details for Serial season 2! Queer Eye Fans, "Karamo" Is the Podcast We&apos;ve Been Waiting For — Call in For Actual Advice!
Combined EXAFS and DFT structure calculations provide structural insights into the 1:1 multi-histidine complexes of Cu(II), Cu(I), and Zn(II) with the tandem octarepeats of the mammalian prion protein. The metal-coordinating properties of the prion protein (PrP) have been the subject of intense focus and debate since the first reports of its interaction with copper just before the turn of the century. The picture of metal coordination to PrP has been improved and refined over the past decade, but structural details of the various metal coordination modes have not been fully elucidated in some cases. In the present study, we have employed X-ray absorption near-edge spectroscopy as well as extended X-ray absorption fine structure (EXAFS) spectroscopy to structurally characterize the dominant 1:1 coordination modes for Cu(II), Cu(I), and Zn(II) with an N-terminal fragment of PrP. The PrP fragment corresponds to four tandem repeats representative of the mammalian octarepeat domain, designated as OR4, which is also the most studied PrP fragment for metal interactions, making our findings applicable to a large body of previous work. Density functional theory (DFT) calculations have provided additional structural and thermodynamic data, and candidate structures have been used to inform EXAFS data analysis. The optimized geometries from DFT calculations have been used to identify potential coordination complexes for multi-histidine coordination of Cu(II), Cu(I), and Zn(II) in an aqueous medium, modelled using 4-methylimidazole to represent the histidine side chain. Through a combination of in silico coordination chemistry as well as rigorous EXAFS curve-fitting, using full multiple scattering on candidate structures derived from DFT calculations, we have characterized the predominant coordination modes for the 1:1 complexes of Cu(II), Cu(I), and Zn(II) with the OR4 peptide at pH7.4 at atomic resolution, which are best represented as square-planar (2+), digonal (+), and tetrahedral (2+), respectively.
1. Field of the Invention The present invention relates to editing systems for voice recognition and, more particularly, to a system and method for editing messages transcribed from speech from a telephone. 2. Description of the Related Art Advances in personal communications in recent years have led to information being transmitted through a variety of channels to users, for instance speech, multi-media (figures and speech), text (e-mail, pagers), etc. Due to these advances, there has arisen the concept of unified whereby the messages received by a user through various media are stored in a single repository and can be retrieved or searched by the user at his/her convenience. Further, it may be the case that the user has only a personal digital assistant (PDA) with very limited capabilities through which to retrieve his messages. In general however, even the simplest of PDA's will support the reception of text, though it may not support the reception of multimedia signals. Consequently, it may be necessary to convert speech and multimedia signals into text so that the signals can be easily accessed. This also has implications on the bandwidth requirements for communication--text signals require less bandwidth than speech for transmission. Voicemail is a commonly used messaging system wherein the speech of a person is recorded and subsequently played back by the recipient of the message. Hence, an important component of unified messaging is the capability to convert such messages into text. This can of course be done by using automatic speech recognition algorithms. However, voicemail messages typically represent spontaneous speech recorded over an unknown (the caller who is leaving the message may be halfway around the earth or next door) telephone bandwidth channel, and hence represent a very challenging task for automatic speech recognition systems. there is the danger of the transcribed text being so full of errors that the recipient of the message may not be able to decipher the message at all. Hence, it is advantageous to incorporate some form of feedback mechanism whereby the person leaving the message can check the quality of the transcription and correct it if necessary. Therefore, a need exists for an interactive system and method for converting speech data into text and incorporating the feature of correction of the transcribed text by voice.
<filename>server/webserver/__main__.py import sys import click from flask import Flask from flask.cli import with_appcontext from webserver.definitions import ROOT_DIR from webserver.models import db from webserver.views import api_bp, home # fix import sys.path.insert(0, ROOT_DIR) # initialize flask app app = Flask(__name__) # handle errors @app.errorhandler(404) def page_not_found(error): """ Handles 404 errors by simply just returning the code and no page rendering""" return "", 404 # database setup command @click.command(name="create_tables") @with_appcontext def create_tables(): db.create_all() # start the app if in main module if __name__ == '__main__': # register blueprints app.register_blueprint(home) app.register_blueprint(api_bp, url_prefix='/api') # set config app.config.from_pyfile('config.py', silent=True) # register setup command app.cli.add_command(create_tables) # initialize postgres db.init_app(app) # run application app.run()