patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -342,6 +342,10 @@ func (s *Service) createStressChaos(exp *core.ExperimentInfo) error {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
+ if exp.Target.StressChaos.ContainerName != nil {
+ chaos.Spec.ContainerName = exp.Target.StressChaos.ContainerName
+ }
+
return s.kubeCli.Create(context.Background(), chaos)
}
| 1 | // Copyright 2020 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package experiment
import (
"context"
"encoding/json"
"fmt"
"net/http"
"sync"
"time"
"github.com/gin-gonic/gin"
"github.com/jinzhu/gorm"
"golang.org/x/sync/errgroup"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/controllers/common"
"github.com/chaos-mesh/chaos-mesh/pkg/apiserver/utils"
"github.com/chaos-mesh/chaos-mesh/pkg/config"
"github.com/chaos-mesh/chaos-mesh/pkg/core"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var log = ctrl.Log.WithName("experiment api")
// ChaosState defines the number of chaos experiments of each phase
type ChaosState struct {
Total int `json:"Total"`
Running int `json:"Running"`
Waiting int `json:"Waiting"`
Paused int `json:"Paused"`
Failed int `json:"Failed"`
Finished int `json:"Finished"`
}
// Service defines a handler service for experiments.
type Service struct {
conf *config.ChaosDashboardConfig
kubeCli client.Client
archive core.ExperimentStore
event core.EventStore
}
// NewService returns an experiment service instance.
func NewService(
conf *config.ChaosDashboardConfig,
cli client.Client,
archive core.ExperimentStore,
event core.EventStore,
) *Service {
return &Service{
conf: conf,
kubeCli: cli,
archive: archive,
event: event,
}
}
// Register mounts our HTTP handler on the mux.
func Register(r *gin.RouterGroup, s *Service) {
endpoint := r.Group("/experiments")
// TODO: add more api handlers
endpoint.GET("", s.listExperiments)
endpoint.POST("/new", s.createExperiment)
endpoint.GET("/detail/:uid", s.getExperimentDetail)
endpoint.DELETE("/:uid", s.deleteExperiment)
endpoint.PUT("/update", s.updateExperiment)
endpoint.PUT("/pause/:uid", s.pauseExperiment)
endpoint.PUT("/start/:uid", s.startExperiment)
endpoint.GET("/state", s.state)
}
// Experiment defines the basic information of an experiment
type Experiment struct {
ExperimentBase
Created string `json:"created"`
Status string `json:"status"`
UID string `json:"uid"`
}
// ExperimentBase is used to identify the unique experiment from API request.
type ExperimentBase struct {
Kind string `uri:"kind" binding:"required,oneof=PodChaos NetworkChaos IoChaos StressChaos TimeChaos KernelChaos" json:"kind"`
Namespace string `uri:"namespace" binding:"required,NameValid" json:"namespace"`
Name string `uri:"name" binding:"required,NameValid" json:"name"`
}
// ExperimentDetail represents an experiment instance.
type ExperimentDetail struct {
Experiment
ExperimentInfo core.ExperimentInfo `json:"experiment_info"`
}
type actionFunc func(info *core.ExperimentInfo) error
// @Summary Create a new chaos experiment.
// @Description Create a new chaos experiment.
// @Tags experiments
// @Produce json
// @Param request body core.ExperimentInfo true "Request body"
// @Success 200 "create ok"
// @Failure 400 {object} utils.APIError
// @Failure 500 {object} utils.APIError
// @Router /api/experiments/new [post]
func (s *Service) createExperiment(c *gin.Context) {
exp := &core.ExperimentInfo{}
if err := c.ShouldBindJSON(exp); err != nil {
c.Status(http.StatusBadRequest)
_ = c.Error(utils.ErrInvalidRequest.WrapWithNoMessage(err))
return
}
createFuncs := map[string]actionFunc{
v1alpha1.KindPodChaos: s.createPodChaos,
v1alpha1.KindNetworkChaos: s.createNetworkChaos,
v1alpha1.KindIOChaos: s.createIOChaos,
v1alpha1.KindStressChaos: s.createStressChaos,
v1alpha1.KindTimeChaos: s.createTimeChaos,
v1alpha1.KindKernelChaos: s.createKernelChaos,
}
f, ok := createFuncs[exp.Target.Kind]
if !ok {
c.Status(http.StatusBadRequest)
_ = c.Error(utils.ErrInvalidRequest.New(exp.Target.Kind + " is not supported"))
return
}
if err := f(exp); err != nil {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
return
}
c.JSON(http.StatusOK, nil)
}
func (s *Service) createPodChaos(exp *core.ExperimentInfo) error {
chaos := &v1alpha1.PodChaos{
ObjectMeta: v1.ObjectMeta{
Name: exp.Name,
Namespace: exp.Namespace,
Labels: exp.Labels,
Annotations: exp.Annotations,
},
Spec: v1alpha1.PodChaosSpec{
Selector: exp.Scope.ParseSelector(),
Action: v1alpha1.PodChaosAction(exp.Target.PodChaos.Action),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
ContainerName: exp.Target.PodChaos.ContainerName,
},
}
if exp.Scheduler.Cron != "" {
chaos.Spec.Scheduler = &v1alpha1.SchedulerSpec{Cron: exp.Scheduler.Cron}
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return s.kubeCli.Create(context.Background(), chaos)
}
func (s *Service) createNetworkChaos(exp *core.ExperimentInfo) error {
chaos := &v1alpha1.NetworkChaos{
ObjectMeta: v1.ObjectMeta{
Name: exp.Name,
Namespace: exp.Namespace,
Labels: exp.Labels,
Annotations: exp.Annotations,
},
Spec: v1alpha1.NetworkChaosSpec{
Selector: exp.Scope.ParseSelector(),
Action: v1alpha1.NetworkChaosAction(exp.Target.NetworkChaos.Action),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
Delay: exp.Target.NetworkChaos.Delay,
Loss: exp.Target.NetworkChaos.Loss,
Duplicate: exp.Target.NetworkChaos.Duplicate,
Corrupt: exp.Target.NetworkChaos.Corrupt,
},
}
if exp.Target.NetworkChaos.Action == string(v1alpha1.BandwidthAction) {
chaos.Spec.Bandwidth = exp.Target.NetworkChaos.Bandwidth
chaos.Spec.Direction = v1alpha1.Direction(exp.Target.NetworkChaos.Direction)
}
if exp.Scheduler.Cron != "" {
chaos.Spec.Scheduler = &v1alpha1.SchedulerSpec{Cron: exp.Scheduler.Cron}
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
if exp.Target.NetworkChaos.TargetScope != nil {
chaos.Spec.Target = &v1alpha1.Target{
TargetSelector: exp.Target.NetworkChaos.TargetScope.ParseSelector(),
TargetMode: v1alpha1.PodMode(exp.Target.NetworkChaos.TargetScope.Mode),
TargetValue: exp.Target.NetworkChaos.TargetScope.Value,
}
}
return s.kubeCli.Create(context.Background(), chaos)
}
func (s *Service) createIOChaos(exp *core.ExperimentInfo) error {
chaos := &v1alpha1.IoChaos{
ObjectMeta: v1.ObjectMeta{
Name: exp.Name,
Namespace: exp.Namespace,
Labels: exp.Labels,
Annotations: exp.Annotations,
},
Spec: v1alpha1.IoChaosSpec{
Selector: exp.Scope.ParseSelector(),
Action: v1alpha1.IOChaosAction(exp.Target.IOChaos.Action),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
// TODO: don't hardcode after we support other layers
Layer: v1alpha1.FileSystemLayer,
Addr: exp.Target.IOChaos.Addr,
Delay: exp.Target.IOChaos.Delay,
Errno: exp.Target.IOChaos.Errno,
Path: exp.Target.IOChaos.Path,
Percent: exp.Target.IOChaos.Percent,
Methods: exp.Target.IOChaos.Methods,
},
}
if exp.Scheduler.Cron != "" {
chaos.Spec.Scheduler = &v1alpha1.SchedulerSpec{Cron: exp.Scheduler.Cron}
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return s.kubeCli.Create(context.Background(), chaos)
}
func (s *Service) createTimeChaos(exp *core.ExperimentInfo) error {
chaos := &v1alpha1.TimeChaos{
ObjectMeta: v1.ObjectMeta{
Name: exp.Name,
Namespace: exp.Namespace,
Labels: exp.Labels,
Annotations: exp.Annotations,
},
Spec: v1alpha1.TimeChaosSpec{
Selector: exp.Scope.ParseSelector(),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
TimeOffset: exp.Target.TimeChaos.TimeOffset,
ClockIds: exp.Target.TimeChaos.ClockIDs,
ContainerNames: exp.Target.TimeChaos.ContainerNames,
},
}
if exp.Scheduler.Cron != "" {
chaos.Spec.Scheduler = &v1alpha1.SchedulerSpec{Cron: exp.Scheduler.Cron}
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return s.kubeCli.Create(context.Background(), chaos)
}
func (s *Service) createKernelChaos(exp *core.ExperimentInfo) error {
chaos := &v1alpha1.KernelChaos{
ObjectMeta: v1.ObjectMeta{
Name: exp.Name,
Namespace: exp.Namespace,
Labels: exp.Labels,
Annotations: exp.Annotations,
},
Spec: v1alpha1.KernelChaosSpec{
Selector: exp.Scope.ParseSelector(),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
FailKernRequest: exp.Target.KernelChaos.FailKernRequest,
},
}
if exp.Scheduler.Cron != "" {
chaos.Spec.Scheduler = &v1alpha1.SchedulerSpec{Cron: exp.Scheduler.Cron}
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return s.kubeCli.Create(context.Background(), chaos)
}
func (s *Service) createStressChaos(exp *core.ExperimentInfo) error {
chaos := &v1alpha1.StressChaos{
ObjectMeta: v1.ObjectMeta{
Name: exp.Name,
Namespace: exp.Namespace,
Labels: exp.Labels,
Annotations: exp.Annotations,
},
Spec: v1alpha1.StressChaosSpec{
Selector: exp.Scope.ParseSelector(),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
Stressors: exp.Target.StressChaos.Stressors,
StressngStressors: exp.Target.StressChaos.StressngStressors,
},
}
if exp.Scheduler.Cron != "" {
chaos.Spec.Scheduler = &v1alpha1.SchedulerSpec{Cron: exp.Scheduler.Cron}
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return s.kubeCli.Create(context.Background(), chaos)
}
func (s *Service) getPodChaosDetail(namespace string, name string) (ExperimentDetail, error) {
chaos := &v1alpha1.PodChaos{}
ctx := context.TODO()
chaosKey := types.NamespacedName{Namespace: namespace, Name: name}
if err := s.kubeCli.Get(ctx, chaosKey, chaos); err != nil {
if apierrors.IsNotFound(err) {
return ExperimentDetail{}, utils.ErrNotFound.NewWithNoMessage()
}
return ExperimentDetail{}, err
}
info := core.ExperimentInfo{
Name: chaos.Name,
Namespace: chaos.Namespace,
Labels: chaos.Labels,
Annotations: chaos.Annotations,
Scope: core.ScopeInfo{
SelectorInfo: core.SelectorInfo{
NamespaceSelectors: chaos.Spec.Selector.Namespaces,
LabelSelectors: chaos.Spec.Selector.LabelSelectors,
AnnotationSelectors: chaos.Spec.Selector.AnnotationSelectors,
FieldSelectors: chaos.Spec.Selector.FieldSelectors,
PhaseSelector: chaos.Spec.Selector.PodPhaseSelectors,
Pods: chaos.Spec.Selector.Pods,
},
Mode: string(chaos.Spec.Mode),
Value: chaos.Spec.Value,
},
Target: core.TargetInfo{
Kind: v1alpha1.KindPodChaos,
PodChaos: &core.PodChaosInfo{
Action: string(chaos.Spec.Action),
ContainerName: chaos.Spec.ContainerName,
},
},
}
if chaos.Spec.Scheduler != nil {
info.Scheduler.Cron = chaos.Spec.Scheduler.Cron
}
if chaos.Spec.Duration != nil {
info.Scheduler.Duration = *chaos.Spec.Duration
}
return ExperimentDetail{
Experiment: Experiment{
ExperimentBase: ExperimentBase{
Kind: chaos.Kind,
Namespace: chaos.Namespace,
Name: chaos.Name,
},
Created: chaos.GetChaos().StartTime.Format(time.RFC3339),
Status: chaos.GetChaos().Status,
UID: chaos.GetChaos().UID,
},
ExperimentInfo: info,
}, nil
}
func (s *Service) getIoChaosDetail(namespace string, name string) (ExperimentDetail, error) {
chaos := &v1alpha1.IoChaos{}
ctx := context.TODO()
chaosKey := types.NamespacedName{Namespace: namespace, Name: name}
if err := s.kubeCli.Get(ctx, chaosKey, chaos); err != nil {
if apierrors.IsNotFound(err) {
return ExperimentDetail{}, utils.ErrNotFound.NewWithNoMessage()
}
return ExperimentDetail{}, err
}
info := core.ExperimentInfo{
Name: chaos.Name,
Namespace: chaos.Namespace,
Labels: chaos.Labels,
Annotations: chaos.Annotations,
Scope: core.ScopeInfo{
SelectorInfo: core.SelectorInfo{
NamespaceSelectors: chaos.Spec.Selector.Namespaces,
LabelSelectors: chaos.Spec.Selector.LabelSelectors,
AnnotationSelectors: chaos.Spec.Selector.AnnotationSelectors,
FieldSelectors: chaos.Spec.Selector.FieldSelectors,
PhaseSelector: chaos.Spec.Selector.PodPhaseSelectors,
Pods: chaos.Spec.Selector.Pods,
},
Mode: string(chaos.Spec.Mode),
Value: chaos.Spec.Value,
},
Target: core.TargetInfo{
Kind: v1alpha1.KindIOChaos,
IOChaos: &core.IOChaosInfo{
Action: string(chaos.Spec.Action),
Addr: chaos.Spec.Addr,
Delay: chaos.Spec.Delay,
Errno: chaos.Spec.Errno,
Path: chaos.Spec.Path,
Percent: chaos.Spec.Percent,
Methods: chaos.Spec.Methods,
},
},
}
if chaos.Spec.Scheduler != nil {
info.Scheduler.Cron = chaos.Spec.Scheduler.Cron
}
if chaos.Spec.Duration != nil {
info.Scheduler.Duration = *chaos.Spec.Duration
}
return ExperimentDetail{
Experiment: Experiment{
ExperimentBase: ExperimentBase{
Kind: chaos.Kind,
Namespace: chaos.Namespace,
Name: chaos.Name,
},
Created: chaos.GetChaos().StartTime.Format(time.RFC3339),
Status: chaos.GetChaos().Status,
UID: chaos.GetChaos().UID,
},
ExperimentInfo: info,
}, nil
}
func (s *Service) getNetworkChaosDetail(namespace string, name string) (ExperimentDetail, error) {
chaos := &v1alpha1.NetworkChaos{}
ctx := context.TODO()
chaosKey := types.NamespacedName{Namespace: namespace, Name: name}
if err := s.kubeCli.Get(ctx, chaosKey, chaos); err != nil {
if apierrors.IsNotFound(err) {
return ExperimentDetail{}, utils.ErrNotFound.NewWithNoMessage()
}
return ExperimentDetail{}, err
}
info := core.ExperimentInfo{
Name: chaos.Name,
Namespace: chaos.Namespace,
Labels: chaos.Labels,
Annotations: chaos.Annotations,
Scope: core.ScopeInfo{
SelectorInfo: core.SelectorInfo{
NamespaceSelectors: chaos.Spec.Selector.Namespaces,
LabelSelectors: chaos.Spec.Selector.LabelSelectors,
AnnotationSelectors: chaos.Spec.Selector.AnnotationSelectors,
FieldSelectors: chaos.Spec.Selector.FieldSelectors,
PhaseSelector: chaos.Spec.Selector.PodPhaseSelectors,
Pods: chaos.Spec.Selector.Pods,
},
Mode: string(chaos.Spec.Mode),
Value: chaos.Spec.Value,
},
Target: core.TargetInfo{
Kind: v1alpha1.KindNetworkChaos,
NetworkChaos: &core.NetworkChaosInfo{
Action: string(chaos.Spec.Action),
Delay: chaos.Spec.Delay,
Loss: chaos.Spec.Loss,
Duplicate: chaos.Spec.Duplicate,
Corrupt: chaos.Spec.Corrupt,
Bandwidth: chaos.Spec.Bandwidth,
Direction: string(chaos.Spec.Direction),
TargetScope: &core.ScopeInfo{
SelectorInfo: core.SelectorInfo{
NamespaceSelectors: chaos.Spec.Selector.Namespaces,
LabelSelectors: chaos.Spec.Selector.LabelSelectors,
AnnotationSelectors: chaos.Spec.Selector.AnnotationSelectors,
FieldSelectors: chaos.Spec.Selector.FieldSelectors,
PhaseSelector: chaos.Spec.Selector.PodPhaseSelectors,
},
},
},
},
}
if chaos.Spec.Scheduler != nil {
info.Scheduler.Cron = chaos.Spec.Scheduler.Cron
}
if chaos.Spec.Duration != nil {
info.Scheduler.Duration = *chaos.Spec.Duration
}
if chaos.Spec.Target != nil {
info.Target.NetworkChaos.TargetScope.Mode = string(chaos.Spec.Target.TargetMode)
info.Target.NetworkChaos.TargetScope.Value = chaos.Spec.Target.TargetValue
}
return ExperimentDetail{
Experiment: Experiment{
ExperimentBase: ExperimentBase{
Kind: chaos.Kind,
Namespace: chaos.Namespace,
Name: chaos.Name,
},
Created: chaos.GetChaos().StartTime.Format(time.RFC3339),
Status: chaos.GetChaos().Status,
UID: chaos.GetChaos().UID,
},
ExperimentInfo: info,
}, nil
}
func (s *Service) getTimeChaosDetail(namespace string, name string) (ExperimentDetail, error) {
chaos := &v1alpha1.TimeChaos{}
ctx := context.TODO()
chaosKey := types.NamespacedName{Namespace: namespace, Name: name}
if err := s.kubeCli.Get(ctx, chaosKey, chaos); err != nil {
if apierrors.IsNotFound(err) {
return ExperimentDetail{}, utils.ErrNotFound.NewWithNoMessage()
}
return ExperimentDetail{}, err
}
info := core.ExperimentInfo{
Name: chaos.Name,
Namespace: chaos.Namespace,
Labels: chaos.Labels,
Annotations: chaos.Annotations,
Scope: core.ScopeInfo{
SelectorInfo: core.SelectorInfo{
NamespaceSelectors: chaos.Spec.Selector.Namespaces,
LabelSelectors: chaos.Spec.Selector.LabelSelectors,
AnnotationSelectors: chaos.Spec.Selector.AnnotationSelectors,
FieldSelectors: chaos.Spec.Selector.FieldSelectors,
PhaseSelector: chaos.Spec.Selector.PodPhaseSelectors,
Pods: chaos.Spec.Selector.Pods,
},
Mode: string(chaos.Spec.Mode),
Value: chaos.Spec.Value,
},
Target: core.TargetInfo{
Kind: v1alpha1.KindTimeChaos,
TimeChaos: &core.TimeChaosInfo{
TimeOffset: chaos.Spec.TimeOffset,
ClockIDs: chaos.Spec.ClockIds,
ContainerNames: chaos.Spec.ContainerNames,
},
},
}
if chaos.Spec.Scheduler != nil {
info.Scheduler.Cron = chaos.Spec.Scheduler.Cron
}
if chaos.Spec.Duration != nil {
info.Scheduler.Duration = *chaos.Spec.Duration
}
return ExperimentDetail{
Experiment: Experiment{
ExperimentBase: ExperimentBase{
Kind: chaos.Kind,
Namespace: chaos.Namespace,
Name: chaos.Name,
},
Created: chaos.GetChaos().StartTime.Format(time.RFC3339),
Status: chaos.GetChaos().Status,
UID: chaos.GetChaos().UID,
},
ExperimentInfo: info,
}, nil
}
func (s *Service) getKernelChaosDetail(namespace string, name string) (ExperimentDetail, error) {
chaos := &v1alpha1.KernelChaos{}
ctx := context.TODO()
chaosKey := types.NamespacedName{Namespace: namespace, Name: name}
if err := s.kubeCli.Get(ctx, chaosKey, chaos); err != nil {
if apierrors.IsNotFound(err) {
return ExperimentDetail{}, utils.ErrNotFound.NewWithNoMessage()
}
return ExperimentDetail{}, err
}
info := core.ExperimentInfo{
Name: chaos.Name,
Namespace: chaos.Namespace,
Labels: chaos.Labels,
Annotations: chaos.Annotations,
Scope: core.ScopeInfo{
SelectorInfo: core.SelectorInfo{
NamespaceSelectors: chaos.Spec.Selector.Namespaces,
LabelSelectors: chaos.Spec.Selector.LabelSelectors,
AnnotationSelectors: chaos.Spec.Selector.AnnotationSelectors,
FieldSelectors: chaos.Spec.Selector.FieldSelectors,
PhaseSelector: chaos.Spec.Selector.PodPhaseSelectors,
Pods: chaos.Spec.Selector.Pods,
},
Mode: string(chaos.Spec.Mode),
Value: chaos.Spec.Value,
},
Target: core.TargetInfo{
Kind: v1alpha1.KindKernelChaos,
KernelChaos: &core.KernelChaosInfo{
FailKernRequest: chaos.Spec.FailKernRequest,
},
},
}
if chaos.Spec.Scheduler != nil {
info.Scheduler.Cron = chaos.Spec.Scheduler.Cron
}
if chaos.Spec.Duration != nil {
info.Scheduler.Duration = *chaos.Spec.Duration
}
return ExperimentDetail{
Experiment: Experiment{
ExperimentBase: ExperimentBase{
Kind: chaos.Kind,
Namespace: chaos.Namespace,
Name: chaos.Name,
},
Created: chaos.GetChaos().StartTime.Format(time.RFC3339),
Status: chaos.GetChaos().Status,
UID: chaos.GetChaos().UID,
},
ExperimentInfo: info,
}, nil
}
func (s *Service) getStressChaosDetail(namespace string, name string) (ExperimentDetail, error) {
chaos := &v1alpha1.StressChaos{}
ctx := context.TODO()
chaosKey := types.NamespacedName{Namespace: namespace, Name: name}
if err := s.kubeCli.Get(ctx, chaosKey, chaos); err != nil {
if apierrors.IsNotFound(err) {
return ExperimentDetail{}, utils.ErrNotFound.NewWithNoMessage()
}
return ExperimentDetail{}, err
}
info := core.ExperimentInfo{
Name: chaos.Name,
Namespace: chaos.Namespace,
Labels: chaos.Labels,
Annotations: chaos.Annotations,
Scope: core.ScopeInfo{
SelectorInfo: core.SelectorInfo{
NamespaceSelectors: chaos.Spec.Selector.Namespaces,
LabelSelectors: chaos.Spec.Selector.LabelSelectors,
AnnotationSelectors: chaos.Spec.Selector.AnnotationSelectors,
FieldSelectors: chaos.Spec.Selector.FieldSelectors,
PhaseSelector: chaos.Spec.Selector.PodPhaseSelectors,
Pods: chaos.Spec.Selector.Pods,
},
Mode: string(chaos.Spec.Mode),
Value: chaos.Spec.Value,
},
Target: core.TargetInfo{
Kind: v1alpha1.KindStressChaos,
StressChaos: &core.StressChaosInfo{
Stressors: chaos.Spec.Stressors,
StressngStressors: chaos.Spec.StressngStressors,
},
},
}
if chaos.Spec.Scheduler != nil {
info.Scheduler.Cron = chaos.Spec.Scheduler.Cron
}
if chaos.Spec.Duration != nil {
info.Scheduler.Duration = *chaos.Spec.Duration
}
return ExperimentDetail{
Experiment: Experiment{
ExperimentBase: ExperimentBase{
Kind: chaos.Kind,
Namespace: chaos.Namespace,
Name: chaos.Name,
},
Created: chaos.GetChaos().StartTime.Format(time.RFC3339),
Status: chaos.GetChaos().Status,
UID: chaos.GetChaos().UID,
},
ExperimentInfo: info,
}, nil
}
// @Summary Get chaos experiments from Kubernetes cluster.
// @Description Get chaos experiments from Kubernetes cluster.
// @Tags experiments
// @Produce json
// @Param namespace query string false "namespace"
// @Param name query string false "name"
// @Param kind query string false "kind" Enums(PodChaos, IoChaos, NetworkChaos, TimeChaos, KernelChaos, StressChaos)
// @Param status query string false "status" Enums(Running, Paused, Failed, Finished)
// @Success 200 {array} Experiment
// @Router /api/experiments [get]
// @Failure 500 {object} utils.APIError
func (s *Service) listExperiments(c *gin.Context) {
kind := c.Query("kind")
name := c.Query("name")
ns := c.Query("namespace")
status := c.Query("status")
data := make([]*Experiment, 0)
for key, list := range v1alpha1.AllKinds() {
if kind != "" && key != kind {
continue
}
if err := s.kubeCli.List(context.Background(), list.ChaosList, &client.ListOptions{Namespace: ns}); err != nil {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
return
}
for _, chaos := range list.ListChaos() {
if name != "" && chaos.Name != name {
continue
}
if status != "" && chaos.Status != status {
continue
}
data = append(data, &Experiment{
ExperimentBase: ExperimentBase{
Name: chaos.Name,
Namespace: chaos.Namespace,
Kind: chaos.Kind,
},
Created: chaos.StartTime.Format(time.RFC3339),
Status: chaos.Status,
UID: chaos.UID,
})
}
}
c.JSON(http.StatusOK, data)
}
// @Summary Get detailed information about the specified chaos experiment.
// @Description Get detailed information about the specified chaos experiment.
// @Tags experiments
// @Produce json
// @Param uid path string true "uid"
// @Router /api/experiments/detail/{uid} [GET]
// @Success 200 {object} ExperimentDetail
// @Failure 400 {object} utils.APIError
// @Failure 500 {object} utils.APIError
func (s *Service) getExperimentDetail(c *gin.Context) {
var (
err error
exp *core.ArchiveExperiment
expDetail ExperimentDetail
)
uid := c.Param("uid")
if exp, err = s.archive.FindByUID(context.Background(), uid); err != nil {
if !gorm.IsRecordNotFoundError(err) {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.NewWithNoMessage())
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInvalidRequest.New("the experiment is not found"))
}
}
kind := exp.Kind
ns := exp.Namespace
name := exp.Name
switch kind {
case v1alpha1.KindPodChaos:
expDetail, err = s.getPodChaosDetail(ns, name)
case v1alpha1.KindIOChaos:
expDetail, err = s.getIoChaosDetail(ns, name)
case v1alpha1.KindNetworkChaos:
expDetail, err = s.getNetworkChaosDetail(ns, name)
case v1alpha1.KindTimeChaos:
expDetail, err = s.getTimeChaosDetail(ns, name)
case v1alpha1.KindKernelChaos:
expDetail, err = s.getKernelChaosDetail(ns, name)
case v1alpha1.KindStressChaos:
expDetail, err = s.getStressChaosDetail(ns, name)
}
if err != nil {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
return
}
c.JSON(http.StatusOK, expDetail)
}
// @Summary Delete the specified chaos experiment.
// @Description Delete the specified chaos experiment.
// @Tags experiments
// @Produce json
// @Param uid path string true "uid"
// @Param force query string true "force" Enums(true, false)
// @Success 200 "delete ok"
// @Failure 400 {object} utils.APIError
// @Failure 404 {object} utils.APIError
// @Failure 500 {object} utils.APIError
// @Router /api/experiments/{uid} [delete]
func (s *Service) deleteExperiment(c *gin.Context) {
var (
chaosKind *v1alpha1.ChaosKind
chaosMeta metav1.Object
ok bool
err error
exp *core.ArchiveExperiment
)
uid := c.Param("uid")
if exp, err = s.archive.FindByUID(context.Background(), uid); err != nil {
if !gorm.IsRecordNotFoundError(err) {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.NewWithNoMessage())
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInvalidRequest.New("the experiment is not found"))
}
}
kind := exp.Kind
ns := exp.Namespace
name := exp.Name
force := c.DefaultQuery("force", "false")
ctx := context.TODO()
chaosKey := types.NamespacedName{Namespace: ns, Name: name}
if chaosKind, ok = v1alpha1.AllKinds()[kind]; !ok {
c.Status(http.StatusBadRequest)
_ = c.Error(utils.ErrInvalidRequest.New(kind + " is not supported"))
return
}
if err := s.kubeCli.Get(ctx, chaosKey, chaosKind.Chaos); err != nil {
if apierrors.IsNotFound(err) {
c.Status(http.StatusNotFound)
_ = c.Error(utils.ErrNotFound.NewWithNoMessage())
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
}
return
}
if force == "true" {
if chaosMeta, ok = chaosKind.Chaos.(metav1.Object); !ok {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(fmt.Errorf("failed to get chaos meta information")))
return
}
annotations := chaosMeta.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
annotations[common.AnnotationCleanFinalizer] = common.AnnotationCleanFinalizerForced
chaosMeta.SetAnnotations(annotations)
if err := s.kubeCli.Update(context.Background(), chaosKind.Chaos); err != nil {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(fmt.Errorf("forced deletion of chaos failed, because update chaos annotation error")))
return
}
}
if err := s.kubeCli.Delete(ctx, chaosKind.Chaos, &client.DeleteOptions{}); err != nil {
if apierrors.IsNotFound(err) {
c.Status(http.StatusNotFound)
_ = c.Error(utils.ErrNotFound.NewWithNoMessage())
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
}
return
}
c.JSON(http.StatusOK, nil)
}
// @Summary Get chaos experiments state from Kubernetes cluster.
// @Description Get chaos experiments state from Kubernetes cluster.
// @Tags experiments
// @Produce json
// @Success 200 {object} ChaosState
// @Router /api/experiments/state [get]
// @Failure 500 {object} utils.APIError
func (s *Service) state(c *gin.Context) {
data := new(ChaosState)
g, ctx := errgroup.WithContext(context.Background())
m := &sync.Mutex{}
kinds := v1alpha1.AllKinds()
for index := range kinds {
list := kinds[index]
g.Go(func() error {
if err := s.kubeCli.List(ctx, list.ChaosList); err != nil {
return err
}
m.Lock()
for _, chaos := range list.ListChaos() {
switch chaos.Status {
case string(v1alpha1.ExperimentPhaseRunning):
data.Running++
case string(v1alpha1.ExperimentPhaseWaiting):
data.Waiting++
case string(v1alpha1.ExperimentPhasePaused):
data.Paused++
case string(v1alpha1.ExperimentPhaseFailed):
data.Failed++
case string(v1alpha1.ExperimentPhaseFinished):
data.Finished++
}
data.Total++
}
m.Unlock()
return nil
})
}
if err := g.Wait(); err != nil {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
return
}
c.JSON(http.StatusOK, data)
}
// @Summary Pause chaos experiment by API
// @Description Pause chaos experiment by API
// @Tags experiments
// @Produce json
// @Param uid path string true "uid"
// @Success 200 "pause ok"
// @Failure 400 {object} utils.APIError
// @Failure 404 {object} utils.APIError
// @Failure 500 {object} utils.APIError
// @Router /api/experiments/pause/{uid} [put]
func (s *Service) pauseExperiment(c *gin.Context) {
var (
err error
experiment *core.ArchiveExperiment
)
uid := c.Param("uid")
if experiment, err = s.archive.FindByUID(context.Background(), uid); err != nil {
if !gorm.IsRecordNotFoundError(err) {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.NewWithNoMessage())
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInvalidRequest.New("the experiment is not found"))
}
}
exp := &ExperimentBase{
Kind: experiment.Kind,
Name: experiment.Name,
Namespace: experiment.Namespace,
}
annotations := map[string]string{
v1alpha1.PauseAnnotationKey: "true",
}
if err := s.patchExperiment(exp, annotations); err != nil {
if apierrors.IsNotFound(err) {
c.Status(http.StatusNotFound)
_ = c.Error(utils.ErrNotFound.WrapWithNoMessage(err))
return
}
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
return
}
c.JSON(http.StatusOK, nil)
}
// @Summary Start the paused chaos experiment by API
// @Description Start the paused chaos experiment by API
// @Tags experiments
// @Produce json
// @Param uid path string true "uid"
// @Success 200 "start ok"
// @Failure 400 {object} utils.APIError
// @Failure 404 {object} utils.APIError
// @Failure 500 {object} utils.APIError
// @Router /api/experiments/start/{uid} [put]
func (s *Service) startExperiment(c *gin.Context) {
var (
err error
experiment *core.ArchiveExperiment
)
uid := c.Param("uid")
if experiment, err = s.archive.FindByUID(context.Background(), uid); err != nil {
if !gorm.IsRecordNotFoundError(err) {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.NewWithNoMessage())
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInvalidRequest.New("the experiment is not found"))
}
}
exp := &ExperimentBase{
Kind: experiment.Kind,
Name: experiment.Name,
Namespace: experiment.Namespace,
}
annotations := map[string]string{
v1alpha1.PauseAnnotationKey: "false",
}
if err := s.patchExperiment(exp, annotations); err != nil {
if apierrors.IsNotFound(err) {
c.Status(http.StatusNotFound)
_ = c.Error(utils.ErrNotFound.WrapWithNoMessage(err))
return
}
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
return
}
c.JSON(http.StatusOK, nil)
}
func (s *Service) patchExperiment(exp *ExperimentBase, annotations map[string]string) error {
var (
chaosKind *v1alpha1.ChaosKind
ok bool
)
if chaosKind, ok = v1alpha1.AllKinds()[exp.Kind]; !ok {
return fmt.Errorf("%s is not supported", exp.Kind)
}
key := types.NamespacedName{Namespace: exp.Namespace, Name: exp.Name}
if err := s.kubeCli.Get(context.Background(), key, chaosKind.Chaos); err != nil {
return err
}
var mergePatch []byte
mergePatch, _ = json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": annotations,
},
})
return s.kubeCli.Patch(context.Background(),
chaosKind.Chaos,
client.ConstantPatch(types.MergePatchType, mergePatch))
}
// @Summary Update the chaos experiment by API
// @Description Update the chaos experiment by API
// @Tags experiments
// @Produce json
// @Param request body core.ExperimentInfo true "Request body"
// @Success 200 "update ok"
// @Failure 400 {object} utils.APIError
// @Failure 500 {object} utils.APIError
// @Router /api/experiments/update [put]
func (s *Service) updateExperiment(c *gin.Context) {
exp := &core.ExperimentInfo{}
if err := c.ShouldBindJSON(exp); err != nil {
c.Status(http.StatusBadRequest)
_ = c.Error(utils.ErrInvalidRequest.WrapWithNoMessage(err))
return
}
updateFuncs := map[string]actionFunc{
v1alpha1.KindPodChaos: s.updatePodChaos,
v1alpha1.KindNetworkChaos: s.updateNetworkChaos,
v1alpha1.KindIOChaos: s.updateIOChaos,
v1alpha1.KindStressChaos: s.updateStressChaos,
v1alpha1.KindTimeChaos: s.updateTimeChaos,
v1alpha1.KindKernelChaos: s.updateKernelChaos,
}
f, ok := updateFuncs[exp.Target.Kind]
if !ok {
c.Status(http.StatusBadRequest)
_ = c.Error(utils.ErrInvalidRequest.New(exp.Target.Kind + " is not supported"))
return
}
if err := f(exp); err != nil {
if apierrors.IsNotFound(err) {
c.Status(http.StatusNotFound)
_ = c.Error(utils.ErrNotFound.WrapWithNoMessage(err))
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
}
return
}
c.JSON(http.StatusOK, nil)
}
func (s *Service) updatePodChaos(exp *core.ExperimentInfo) error {
chaos := &v1alpha1.PodChaos{}
key := types.NamespacedName{Namespace: exp.Namespace, Name: exp.Name}
if err := s.kubeCli.Get(context.Background(), key, chaos); err != nil {
return err
}
chaos.SetLabels(exp.Labels)
chaos.SetAnnotations(exp.Annotations)
chaos.Spec = v1alpha1.PodChaosSpec{
Selector: exp.Scope.ParseSelector(),
Action: v1alpha1.PodChaosAction(exp.Target.PodChaos.Action),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
ContainerName: exp.Target.PodChaos.ContainerName,
}
if exp.Scheduler.Cron != "" {
chaos.Spec.Scheduler = &v1alpha1.SchedulerSpec{Cron: exp.Scheduler.Cron}
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return s.kubeCli.Update(context.Background(), chaos)
}
func (s *Service) updateNetworkChaos(exp *core.ExperimentInfo) error {
chaos := &v1alpha1.NetworkChaos{}
key := types.NamespacedName{Namespace: exp.Namespace, Name: exp.Name}
if err := s.kubeCli.Get(context.Background(), key, chaos); err != nil {
return err
}
chaos.SetLabels(exp.Labels)
chaos.SetAnnotations(exp.Annotations)
chaos.Spec = v1alpha1.NetworkChaosSpec{
Selector: exp.Scope.ParseSelector(),
Action: v1alpha1.NetworkChaosAction(exp.Target.NetworkChaos.Action),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
Delay: exp.Target.NetworkChaos.Delay,
Loss: exp.Target.NetworkChaos.Loss,
Duplicate: exp.Target.NetworkChaos.Duplicate,
Corrupt: exp.Target.NetworkChaos.Corrupt,
Bandwidth: exp.Target.NetworkChaos.Bandwidth,
Direction: v1alpha1.Direction(exp.Target.NetworkChaos.Direction),
}
if exp.Scheduler.Cron != "" {
chaos.Spec.Scheduler = &v1alpha1.SchedulerSpec{Cron: exp.Scheduler.Cron}
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
if exp.Target.NetworkChaos.TargetScope != nil {
chaos.Spec.Target = &v1alpha1.Target{
TargetSelector: exp.Target.NetworkChaos.TargetScope.ParseSelector(),
TargetMode: v1alpha1.PodMode(exp.Target.NetworkChaos.TargetScope.Mode),
TargetValue: exp.Target.NetworkChaos.TargetScope.Value,
}
}
return s.kubeCli.Update(context.Background(), chaos)
}
func (s *Service) updateIOChaos(exp *core.ExperimentInfo) error {
chaos := &v1alpha1.IoChaos{}
key := types.NamespacedName{Namespace: exp.Namespace, Name: exp.Name}
if err := s.kubeCli.Get(context.Background(), key, chaos); err != nil {
return err
}
chaos.SetLabels(exp.Labels)
chaos.SetAnnotations(exp.Annotations)
chaos.Spec = v1alpha1.IoChaosSpec{
Selector: exp.Scope.ParseSelector(),
Action: v1alpha1.IOChaosAction(exp.Target.IOChaos.Action),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
// TODO: don't hardcode after we support other layers
Layer: v1alpha1.FileSystemLayer,
Addr: exp.Target.IOChaos.Addr,
Delay: exp.Target.IOChaos.Delay,
Errno: exp.Target.IOChaos.Errno,
Path: exp.Target.IOChaos.Path,
Percent: exp.Target.IOChaos.Percent,
Methods: exp.Target.IOChaos.Methods,
}
if exp.Scheduler.Cron != "" {
chaos.Spec.Scheduler = &v1alpha1.SchedulerSpec{Cron: exp.Scheduler.Cron}
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return s.kubeCli.Update(context.Background(), chaos)
}
func (s *Service) updateKernelChaos(exp *core.ExperimentInfo) error {
chaos := &v1alpha1.KernelChaos{}
key := types.NamespacedName{Namespace: exp.Namespace, Name: exp.Name}
if err := s.kubeCli.Get(context.Background(), key, chaos); err != nil {
return err
}
chaos.SetLabels(exp.Labels)
chaos.SetAnnotations(exp.Annotations)
chaos.Spec = v1alpha1.KernelChaosSpec{
Selector: exp.Scope.ParseSelector(),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
FailKernRequest: exp.Target.KernelChaos.FailKernRequest,
}
if exp.Scheduler.Cron != "" {
chaos.Spec.Scheduler = &v1alpha1.SchedulerSpec{Cron: exp.Scheduler.Cron}
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return s.kubeCli.Update(context.Background(), chaos)
}
func (s *Service) updateTimeChaos(exp *core.ExperimentInfo) error {
chaos := &v1alpha1.TimeChaos{}
key := types.NamespacedName{Namespace: exp.Namespace, Name: exp.Name}
if err := s.kubeCli.Get(context.Background(), key, chaos); err != nil {
return err
}
chaos.SetLabels(exp.Labels)
chaos.SetAnnotations(exp.Annotations)
chaos.Spec = v1alpha1.TimeChaosSpec{
Selector: exp.Scope.ParseSelector(),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
TimeOffset: exp.Target.TimeChaos.TimeOffset,
ClockIds: exp.Target.TimeChaos.ClockIDs,
ContainerNames: exp.Target.TimeChaos.ContainerNames,
}
if exp.Scheduler.Cron != "" {
chaos.Spec.Scheduler = &v1alpha1.SchedulerSpec{Cron: exp.Scheduler.Cron}
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return s.kubeCli.Update(context.Background(), chaos)
}
func (s *Service) updateStressChaos(exp *core.ExperimentInfo) error {
chaos := &v1alpha1.StressChaos{}
key := types.NamespacedName{Namespace: exp.Namespace, Name: exp.Name}
if err := s.kubeCli.Get(context.Background(), key, chaos); err != nil {
return err
}
chaos.SetLabels(exp.Labels)
chaos.SetAnnotations(exp.Annotations)
chaos.Spec = v1alpha1.StressChaosSpec{
Selector: exp.Scope.ParseSelector(),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
Stressors: exp.Target.StressChaos.Stressors,
StressngStressors: exp.Target.StressChaos.StressngStressors,
}
if exp.Scheduler.Cron != "" {
chaos.Spec.Scheduler = &v1alpha1.SchedulerSpec{Cron: exp.Scheduler.Cron}
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return s.kubeCli.Create(context.Background(), chaos)
}
| 1 | 16,513 | Do we need to check if `exp.Target.StressChaos` is `nil`? | chaos-mesh-chaos-mesh | go |
@@ -1,4 +1,4 @@
-//snippet-sourcedescription:[CreateAccessKey.java demonstrates how to create an access key for an AWS Identity and Access Management (IAM) user.]
+//snippet-sourcedescription:[CreateAccessKey.java demonstrates how to create an access key for an AWS Identity and Access Management (AWS IAM) user.]
//snippet-keyword:[AWS SDK for Java v2]
//snippet-keyword:[Code Sample]
//snippet-service:[AWS IAM] | 1 | //snippet-sourcedescription:[CreateAccessKey.java demonstrates how to create an access key for an AWS Identity and Access Management (IAM) user.]
//snippet-keyword:[AWS SDK for Java v2]
//snippet-keyword:[Code Sample]
//snippet-service:[AWS IAM]
//snippet-sourcetype:[full-example]
//snippet-sourcedate:[11/02/2020]
//snippet-sourceauthor:[scmacdon-aws]
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package com.example.iam;
// snippet-start:[iam.java2.create_access_key.import]
import software.amazon.awssdk.services.iam.model.CreateAccessKeyRequest;
import software.amazon.awssdk.services.iam.model.CreateAccessKeyResponse;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.iam.IamClient;
import software.amazon.awssdk.services.iam.model.IamException;
// snippet-end:[iam.java2.create_access_key.import]
/**
* Creates an access key for an IAM user.
*/
public class CreateAccessKey {
public static void main(String[] args) {
final String USAGE = "\n" +
"Usage:\n" +
" CreateAccessKey <user> \n\n" +
"Where:\n" +
" user - an IAM user that you can obtain from the AWS Console.\n\n";
if (args.length != 1) {
System.out.println(USAGE);
System.exit(1);
}
// Read the command line argument
String user = args[0];
Region region = Region.AWS_GLOBAL;
IamClient iam = IamClient
.builder()
.region(region)
.build();
String keyId = createIAMAccessKey(iam, user);
System.out.println("The Key Id is " +keyId);
iam.close();
}
// snippet-start:[iam.java2.create_access_key.main]
public static String createIAMAccessKey(IamClient iam,String user) {
try {
CreateAccessKeyRequest request = CreateAccessKeyRequest.builder()
.userName(user).build();
CreateAccessKeyResponse response = iam.createAccessKey(request);
String keyId = response.accessKey().accessKeyId();
return keyId;
} catch (IamException e) {
System.err.println(e.awsErrorDetails().errorMessage());
System.exit(1);
}
return "";
}
// snippet-end:[iam.java2.create_access_key.main]
}
| 1 | 18,231 | AWS Identity and Access Management (IAM) | awsdocs-aws-doc-sdk-examples | rb |
@@ -110,7 +110,6 @@ spec:
run:
tasks:
- cstor-volume-create-getstorageclass-default
- - cstor-volume-create-getpvc-default
- cstor-volume-create-listclonecstorvolumereplicacr-default
- cstor-volume-create-listcstorpoolcr-default
- cstor-volume-create-puttargetservice-default | 1 | /*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO
// Rename this file by removing the version suffix information
package v1alpha1
const cstorVolumeYamls = `
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: cstor-volume-create-default
spec:
defaultConfig:
- name: VolumeControllerImage
value: {{env "OPENEBS_IO_CSTOR_VOLUME_MGMT_IMAGE" | default "openebs/cstor-volume-mgmt:latest"}}
- name: VolumeTargetImage
value: {{env "OPENEBS_IO_CSTOR_TARGET_IMAGE" | default "openebs/cstor-istgt:latest"}}
- name: VolumeMonitorImage
value: {{env "OPENEBS_IO_VOLUME_MONITOR_IMAGE" | default "openebs/m-exporter:latest"}}
- name: ReplicaCount
value: "3"
# Target Dir is a hostPath directory for target pod
- name: TargetDir
value: {{env "OPENEBS_IO_CSTOR_TARGET_DIR" | default "/var/openebs"}}
# TargetResourceRequests allow you to specify resource requests that need to be available
# before scheduling the containers. If not specified, the default is to use the limits
# from TargetResourceLimits or the default requests set in the cluster.
- name: TargetResourceRequests
value: "none"
# TargetResourceLimits allow you to set the limits on memory and cpu for target pods
# The resource and limit value should be in the same format as expected by
# Kubernetes. Example:
#- name: TargetResourceLimits
# value: |-
# memory: 1Gi
# cpu: 200m
# By default, the resource limits are disabled.
- name: TargetResourceLimits
value: "none"
# AuxResourceRequests allow you to set requests on side cars. Requests have to be specified
# in the format expected by Kubernetes
- name: AuxResourceRequests
value: "none"
# AuxResourceLimits allow you to set limits on side cars. Limits have to be specified
# in the format expected by Kubernetes
- name: AuxResourceLimits
value: "none"
- name: RunNamespace
value: {{env "OPENEBS_NAMESPACE"}}
# ServiceAccountName is the account name assigned to volume management pod
# with permissions to view, create, edit, delete required custom resources
- name: ServiceAccountName
value: {{env "OPENEBS_SERVICE_ACCOUNT"}}
# FSType specifies the format type that Kubernetes should use to
# mount the Persistent Volume. Note that there are no validations
# done to check the validity of the FsType
- name: FSType
value: "ext4"
# Lun specifies the lun number with which Kubernetes should login
# to iSCSI Volume (i.e OpenEBS Persistent Volume)
- name: Lun
value: "0"
# ResyncInterval specifies duration after which a controller should
# resync the resource status
- name: ResyncInterval
value: "30"
# TargetNodeSelector allows you to specify the nodes where
# openebs targets have to be scheduled. To use this feature,
# the nodes should already be labeled with the key=value. For example:
# "kubectl label nodes <node-name> nodetype=storage"
# Note: It is recommended that node selector for replica specify
# nodes that have disks/ssds attached to them. Example:
#- name: TargetNodeSelector
# value: |-
# nodetype: storage
- name: TargetNodeSelector
value: "none"
# TargetTolerations allows you to specify the tolerations for target
# Example:
# - name: TargetTolerations
# value: |-
# t1:
# key: "key1"
# operator: "Equal"
# value: "value1"
# effect: "NoSchedule"
# t2:
# key: "key1"
# operator: "Equal"
# value: "value1"
# effect: "NoExecute"
- name: TargetTolerations
value: "none"
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- cstor-volume-create-getstorageclass-default
- cstor-volume-create-getpvc-default
- cstor-volume-create-listclonecstorvolumereplicacr-default
- cstor-volume-create-listcstorpoolcr-default
- cstor-volume-create-puttargetservice-default
- cstor-volume-create-putcstorvolumecr-default
- cstor-volume-create-puttargetdeployment-default
- cstor-volume-create-putcstorvolumereplicacr-default
output: cstor-volume-create-output-default
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: cstor-volume-delete-default
spec:
defaultConfig:
- name: RunNamespace
value: {{env "OPENEBS_NAMESPACE"}}
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- cstor-volume-delete-listcstorvolumecr-default
- cstor-volume-delete-listtargetservice-default
- cstor-volume-delete-listtargetdeployment-default
- cstor-volume-delete-listcstorvolumereplicacr-default
- cstor-volume-delete-deletetargetservice-default
- cstor-volume-delete-deletetargetdeployment-default
- cstor-volume-delete-deletecstorvolumereplicacr-default
- cstor-volume-delete-deletecstorvolumecr-default
output: cstor-volume-delete-output-default
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: cstor-volume-read-default
spec:
defaultConfig:
- name: RunNamespace
value: {{env "OPENEBS_NAMESPACE"}}
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- cstor-volume-read-listtargetservice-default
- cstor-volume-read-listcstorvolumecr-default
- cstor-volume-read-listcstorvolumereplicacr-default
- cstor-volume-read-listtargetpod-default
output: cstor-volume-read-output-default
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: cstor-volume-list-default
spec:
defaultConfig:
- name: RunNamespace
value: {{env "OPENEBS_NAMESPACE"}}
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- cstor-volume-list-listtargetservice-default
- cstor-volume-list-listtargetpod-default
- cstor-volume-list-listcstorvolumereplicacr-default
- cstor-volume-list-listpv-default
output: cstor-volume-list-output-default
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-getpvc-default
spec:
meta: |
id: creategetpvc
apiVersion: v1
runNamespace: {{ .Volume.runNamespace }}
kind: PersistentVolumeClaim
objectName: {{ .Volume.pvc }}
action: get
post: |
{{- $hostName := jsonpath .JsonResult "{.metadata.annotations.volume\\.kubernetes\\.io/selected-node}" | trim | default "" -}}
{{- $hostName | saveAs "creategetpvc.hostName" .TaskResult | noop -}}
{{- $replicaAntiAffinity := jsonpath .JsonResult "{.metadata.labels.openebs\\.io/replica-anti-affinity}" | trim | default "" -}}
{{- $replicaAntiAffinity | saveAs "creategetpvc.replicaAntiAffinity" .TaskResult | noop -}}
{{- $preferredReplicaAntiAffinity := jsonpath .JsonResult "{.metadata.labels.openebs\\.io/preferred-replica-anti-affinity}" | trim | default "" -}}
{{- $preferredReplicaAntiAffinity | saveAs "creategetpvc.preferredReplicaAntiAffinity" .TaskResult | noop -}}
{{- $targetAffinity := jsonpath .JsonResult "{.metadata.labels.openebs\\.io/target-affinity}" | trim | default "none" -}}
{{- $targetAffinity | saveAs "creategetpvc.targetAffinity" .TaskResult | noop -}}
{{- $stsTargetAffinity := jsonpath .JsonResult "{.metadata.labels.openebs\\.io/sts-target-affinity}" | trim | default "none" -}}
{{- if ne $stsTargetAffinity "none" -}}
{{- $stsTargetAffinity | saveAs "stsTargetAffinity" .TaskResult | noop -}}
{{- end -}}
{{- if ne .TaskResult.stsTargetAffinity "none" -}}
{{- printf "%s-%s" .TaskResult.stsTargetAffinity ((splitList "-" .Volume.pvc) | last) | default "none" | saveAs "sts.applicationName" .TaskResult -}}
{{- end -}}
---
# This RunTask is meant to be run only during clone create requests.
# However, clone & volume creation follow the same CASTemplate specifications.
# As of today, RunTask can not be run based on conditions. Hence, it contains
# a logic which will list empty pools
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-listclonecstorvolumereplicacr-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
runNamespace: {{ .Config.RunNamespace.value }}
id: cvolcreatelistclonecvr
apiVersion: openebs.io/v1alpha1
kind: CStorVolumeReplica
action: list
options: |-
{{- if ne $isClone "false" }}
labelSelector: openebs.io/persistent-volume={{ .Volume.sourceVolume }}
{{- else }}
labelSelector: openebs.io/ignore=false
{{- end }}
post: |
{{- $poolsList := jsonpath .JsonResult "{range .items[*]}pkey=pools,{@.metadata.labels.cstorpool\\.openebs\\.io/uid}={@.metadata.labels.cstorpool\\.openebs\\.io/name};{end}" | trim | default "" | splitListTrim ";" -}}
{{- $poolsList | saveAs "pl" .ListItems -}}
{{- $poolsList | keyMap "cvolPoolList" .ListItems | noop -}}
{{- $poolsNodeList := jsonpath .JsonResult "{range .items[*]}pkey=pools,{@.metadata.labels.cstorpool\\.openebs\\.io/uid}={@.metadata.annotations.cstorpool\\.openebs\\.io/hostname};{end}" | trim | default "" | splitList ";" -}}
{{- $poolsNodeList | keyMap "cvolPoolNodeList" .ListItems | noop -}}
---
# runTask to list cstor pools
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-listcstorpoolcr-default
spec:
meta: |
id: cvolcreatelistpool
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: openebs.io/v1alpha1
kind: CStorPool
action: list
options: |-
labelSelector: openebs.io/storage-pool-claim={{ .Config.StoragePoolClaim.value }}
post: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
{{/*
If clone is not enabled then override changes of previous runtask
*/}}
{{- if eq $isClone "false" }}
{{- $replicaCount := int64 .Config.ReplicaCount.value | saveAs "rc" .ListItems -}}
{{- $poolsList := jsonpath .JsonResult "{range .items[*]}pkey=pools,{@.metadata.uid}={@.metadata.name};{end}" | trim | default "" | splitListTrim ";" -}}
{{- $poolsList | saveAs "pl" .ListItems -}}
{{- len $poolsList | gt $replicaCount | verifyErr "not enough pools available to create replicas" | saveAs "cvolcreatelistpool.verifyErr" .TaskResult | noop -}}
{{- $poolsList | keyMap "cvolPoolList" .ListItems | noop -}}
{{- $poolsNodeList := jsonpath .JsonResult "{range .items[*]}pkey=pools,{@.metadata.uid}={@.metadata.labels.kubernetes\\.io/hostname};{end}" | trim | default "" | splitList ";" -}}
{{- $poolsNodeList | keyMap "cvolPoolNodeList" .ListItems | noop -}}
{{- end }}
---
#runTask to get storageclass info
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-getstorageclass-default
spec:
meta: |
id: creategetsc
apiVersion: storage.k8s.io/v1
kind: StorageClass
objectName: {{ .Volume.storageclass }}
action: get
post: |
{{- $resourceVer := jsonpath .JsonResult "{.metadata.resourceVersion}" -}}
{{- trim $resourceVer | saveAs "creategetsc.storageClassVersion" .TaskResult | noop -}}
{{- $stsTargetAffinity := jsonpath .JsonResult "{.metadata.labels.openebs\\.io/sts-target-affinity}" | trim | default "none" -}}
{{- $stsTargetAffinity | saveAs "stsTargetAffinity" .TaskResult | noop -}}
---
# runTask to create cStor target service
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-puttargetservice-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
{{- $runNamespace := .Config.RunNamespace.value -}}
{{- $pvcServiceAccount := .Config.PVCServiceAccountName.value | default "" -}}
{{- if ne $pvcServiceAccount "" }}
runNamespace: {{ .Volume.runNamespace | saveAs "cvolcreateputsvc.derivedNS" .TaskResult }}
{{ else }}
runNamespace: {{ $runNamespace | saveAs "cvolcreateputsvc.derivedNS" .TaskResult }}
{{- end }}
apiVersion: v1
kind: Service
action: put
id: cvolcreateputsvc
post: |
{{- jsonpath .JsonResult "{.metadata.name}" | trim | saveAs "cvolcreateputsvc.objectName" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.spec.clusterIP}" | trim | saveAs "cvolcreateputsvc.clusterIP" .TaskResult | noop -}}
task: |
apiVersion: v1
kind: Service
metadata:
annotations:
openebs.io/storage-class-ref: |
name: {{ .Volume.storageclass }}
resourceVersion: {{ .TaskResult.creategetsc.storageClassVersion }}
labels:
openebs.io/persistent-volume-claim: {{ .Volume.pvc }}
openebs.io/target-service: cstor-target-svc
openebs.io/storage-engine-type: cstor
openebs.io/cas-type: cstor
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/version: {{ .CAST.version }}
openebs.io/cas-template-name: {{ .CAST.castName }}
name: {{ .Volume.owner }}
spec:
ports:
- name: cstor-iscsi
port: 3260
protocol: TCP
targetPort: 3260
- name: cstor-grpc
port: 7777
protocol: TCP
targetPort: 7777
- name: mgmt
port: 6060
targetPort: 6060
protocol: TCP
- name: exporter
port: 9500
targetPort: 9500
protocol: TCP
selector:
app: cstor-volume-manager
openebs.io/target: cstor-target
openebs.io/persistent-volume: {{ .Volume.owner }}
---
# runTask to create cStorVolume
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-putcstorvolumecr-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
runNamespace: {{ .TaskResult.cvolcreateputsvc.derivedNS }}
apiVersion: openebs.io/v1alpha1
kind: CStorVolume
id: cvolcreateputvolume
action: put
post: |
{{- jsonpath .JsonResult "{.metadata.uid}" | trim | saveAs "cvolcreateputvolume.cstorid" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.metadata.name}" | trim | saveAs "cvolcreateputvolume.objectName" .TaskResult | noop -}}
task: |
{{- $replicaCount := .Config.ReplicaCount.value | int64 -}}
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
apiVersion: openebs.io/v1alpha1
kind: CStorVolume
metadata:
name: {{ .Volume.owner }}
annotations:
openebs.io/fs-type: {{ .Config.FSType.value }}
openebs.io/lun: {{ .Config.Lun.value }}
openebs.io/storage-class-ref: |
name: {{ .Volume.storageclass }}
resourceVersion: {{ .TaskResult.creategetsc.storageClassVersion }}
{{- if ne $isClone "false" }}
openebs.io/snapshot: {{ .Volume.snapshotName }}
{{- end }}
labels:
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/version: {{ .CAST.version }}
openebs.io/cas-template-name: {{ .CAST.castName }}
{{- if ne $isClone "false" }}
openebs.io/source-volume: {{ .Volume.sourceVolume }}
{{- end }}
spec:
targetIP: {{ .TaskResult.cvolcreateputsvc.clusterIP }}
capacity: {{ .Volume.capacity }}
nodeBase: iqn.2016-09.com.openebs.cstor
iqn: iqn.2016-09.com.openebs.cstor:{{ .Volume.owner }}
targetPortal: {{ .TaskResult.cvolcreateputsvc.clusterIP }}:3260
targetPort: 3260
status: "Init"
replicationFactor: {{ $replicaCount }}
consistencyFactor: {{ div $replicaCount 2 | floor | add1 }}
desiredReplicationFactor: {{ $replicaCount }}
versionDetails:
status:
current: {{ .CAST.version }}
dependentsUpgraded: true
desired: {{ .CAST.version }}
autoUpgrade: false
---
# runTask to create cStor target deployment
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-puttargetdeployment-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
runNamespace: {{ .TaskResult.cvolcreateputsvc.derivedNS }}
apiVersion: apps/v1
kind: Deployment
action: put
id: cvolcreateputctrl
post: |
{{- jsonpath .JsonResult "{.metadata.name}" | trim | saveAs "cvolcreateputctrl.objectName" .TaskResult | noop -}}
task: |
{{- $isMonitor := .Config.VolumeMonitorImage.enabled | default "true" | lower -}}
{{- $setResourceRequests := .Config.TargetResourceRequests.value | default "none" -}}
{{- $resourceRequestsVal := fromYaml .Config.TargetResourceRequests.value -}}
{{- $setResourceLimits := .Config.TargetResourceLimits.value | default "none" -}}
{{- $resourceLimitsVal := fromYaml .Config.TargetResourceLimits.value -}}
{{- $setAuxResourceRequests := .Config.AuxResourceRequests.value | default "none" -}}
{{- $auxResourceRequestsVal := fromYaml .Config.AuxResourceRequests.value -}}
{{- $setAuxResourceLimits := .Config.AuxResourceLimits.value | default "none" -}}
{{- $auxResourceLimitsVal := fromYaml .Config.AuxResourceLimits.value -}}
{{- $targetAffinityVal := .TaskResult.creategetpvc.targetAffinity -}}
{{- $hasNodeSelector := .Config.TargetNodeSelector.value | default "none" -}}
{{- $nodeSelectorVal := fromYaml .Config.TargetNodeSelector.value -}}
{{- $hasTargetToleration := .Config.TargetTolerations.value | default "none" -}}
{{- $targetTolerationVal := fromYaml .Config.TargetTolerations.value -}}
{{- $isQueueDepth := .Config.QueueDepth.value | default "" -}}
{{- $isLuworkers := .Config.Luworkers.value | default "" -}}
apiVersion: apps/v1
Kind: Deployment
metadata:
name: {{ .Volume.owner }}-target
labels:
app: cstor-volume-manager
openebs.io/storage-engine-type: cstor
openebs.io/cas-type: cstor
openebs.io/target: cstor-target
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/persistent-volume-claim: {{ .Volume.pvc }}
openebs.io/version: {{ .CAST.version }}
openebs.io/cas-template-name: {{ .CAST.castName }}
openebs.io/storage-pool-claim: {{ .Config.StoragePoolClaim.value }}
annotations:
{{- if eq $isMonitor "true" }}
openebs.io/volume-monitor: "true"
{{- end}}
openebs.io/volume-type: cstor
openebs.io/storage-class-ref: |
name: {{ .Volume.storageclass }}
resourceVersion: {{ .TaskResult.creategetsc.storageClassVersion }}
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: cstor-volume-manager
openebs.io/target: cstor-target
openebs.io/persistent-volume: {{ .Volume.owner }}
template:
metadata:
labels:
{{- if eq $isMonitor "true" }}
monitoring: volume_exporter_prometheus
{{- end}}
app: cstor-volume-manager
openebs.io/target: cstor-target
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/storage-class: {{ .Volume.storageclass }}
openebs.io/persistent-volume-claim: {{ .Volume.pvc }}
openebs.io/version: {{ .CAST.version }}
annotations:
openebs.io/storage-class-ref: |
name: {{ .Volume.storageclass }}
resourceVersion: {{ .TaskResult.creategetsc.storageClassVersion }}
{{- if eq $isMonitor "true" }}
prometheus.io/path: /metrics
prometheus.io/port: "9500"
prometheus.io/scrape: "true"
{{- end}}
spec:
{{- if ne $hasNodeSelector "none" }}
nodeSelector:
{{- range $sK, $sV := $nodeSelectorVal }}
{{ $sK }}: {{ $sV }}
{{- end }}
{{- end}}
serviceAccountName: {{ .Config.PVCServiceAccountName.value | default .Config.ServiceAccountName.value }}
{{- if ne (.TaskResult.sts.applicationName | default "") "" }}
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: statefulset.kubernetes.io/pod-name
operator: In
values:
- {{ .TaskResult.sts.applicationName }}
namespaces:
- {{ .Volume.runNamespace }}
topologyKey: kubernetes.io/hostname
{{- else if ne $targetAffinityVal "none" }}
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: openebs.io/target-affinity
operator: In
values:
- {{ $targetAffinityVal }}
topologyKey: kubernetes.io/hostname
namespaces: [{{.Volume.runNamespace}}]
{{- end }}
tolerations:
- effect: NoExecute
key: node.alpha.kubernetes.io/notReady
operator: Exists
tolerationSeconds: 30
- effect: NoExecute
key: node.alpha.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 30
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 30
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 30
{{- if ne $hasTargetToleration "none" }}
{{- range $k, $v := $targetTolerationVal }}
-
{{- range $kk, $vv := $v }}
{{ $kk }}: {{ $vv }}
{{- end }}
{{- end }}
{{- end }}
containers:
- image: {{ .Config.VolumeTargetImage.value }}
name: cstor-istgt
imagePullPolicy: IfNotPresent
resources:
{{- if ne $setResourceLimits "none" }}
limits:
{{- range $rKey, $rLimit := $resourceLimitsVal }}
{{ $rKey }}: {{ $rLimit }}
{{- end }}
{{- end }}
{{- if ne $setResourceRequests "none" }}
requests:
{{- range $rKey, $rReq := $resourceRequestsVal }}
{{ $rKey }}: {{ $rReq }}
{{- end }}
{{- end }}
ports:
- containerPort: 3260
protocol: TCP
env:
{{- if ne $isQueueDepth "" }}
- name: QueueDepth
value: {{ .Config.QueueDepth.value }}
{{- end }}
{{- if ne $isLuworkers "" }}
- name: Luworkers
value: {{ .Config.Luworkers.value }}
{{- end }}
securityContext:
privileged: true
volumeMounts:
- name: sockfile
mountPath: /var/run
- name: conf
mountPath: /usr/local/etc/istgt
- name: tmp
mountPath: /tmp
mountPropagation: Bidirectional
{{- if eq $isMonitor "true" }}
- image: {{ .Config.VolumeMonitorImage.value }}
name: maya-volume-exporter
resources:
{{- if ne $setAuxResourceRequests "none" }}
requests:
{{- range $rKey, $rLimit := $auxResourceRequestsVal }}
{{ $rKey }}: {{ $rLimit }}
{{- end }}
{{- end }}
{{- if ne $setAuxResourceLimits "none" }}
limits:
{{- range $rKey, $rLimit := $auxResourceLimitsVal }}
{{ $rKey }}: {{ $rLimit }}
{{- end }}
{{- end }}
args:
- "-e=cstor"
command: ["maya-exporter"]
ports:
- containerPort: 9500
protocol: TCP
volumeMounts:
- name: sockfile
mountPath: /var/run
- name: conf
mountPath: /usr/local/etc/istgt
{{- end }}
- name: cstor-volume-mgmt
image: {{ .Config.VolumeControllerImage.value }}
resources:
{{- if ne $setAuxResourceRequests "none" }}
requests:
{{- range $rKey, $rLimit := $auxResourceRequestsVal }}
{{ $rKey }}: {{ $rLimit }}
{{- end }}
{{- end }}
{{- if ne $setAuxResourceLimits "none" }}
limits:
{{- range $rKey, $rLimit := $auxResourceLimitsVal }}
{{ $rKey }}: {{ $rLimit }}
{{- end }}
{{- end }}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
env:
- name: OPENEBS_IO_CSTOR_VOLUME_ID
value: {{ .TaskResult.cvolcreateputvolume.cstorid }}
- name: RESYNC_INTERVAL
value: {{ .Config.ResyncInterval.value }}
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
securityContext:
privileged: true
volumeMounts:
- name: sockfile
mountPath: /var/run
- name: conf
mountPath: /usr/local/etc/istgt
- name: tmp
mountPath: /tmp
mountPropagation: Bidirectional
volumes:
- name: sockfile
emptyDir: {}
- name: conf
emptyDir: {}
- name: tmp
hostPath:
path: {{ .Config.TargetDir.value }}/shared-{{ .Volume.owner }}-target
type: DirectoryOrCreate
---
# runTask to create cStorVolumeReplica/(s)
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-putcstorvolumereplicacr-default
spec:
meta: |
apiVersion: openebs.io/v1alpha1
runNamespace: {{.Config.RunNamespace.value}}
kind: CStorVolumeReplica
action: put
id: cstorvolumecreatereplica
{{/*
Fetch all the cStorPool uids into a list.
Calculate the replica count
Add as many poolUid to resources as there is replica count
*/}}
{{- $hostName := .TaskResult.creategetpvc.hostName -}}
{{- $replicaAntiAffinity := .TaskResult.creategetpvc.replicaAntiAffinity }}
{{- $preferredReplicaAntiAffinity := .TaskResult.creategetpvc.preferredReplicaAntiAffinity }}
{{- $antiAffinityLabelSelector := printf "openebs.io/replica-anti-affinity=%s" $replicaAntiAffinity | IfNotNil $replicaAntiAffinity }}
{{- $preferredAntiAffinityLabelSelector := printf "openebs.io/preferred-replica-anti-affinity=%s" $preferredReplicaAntiAffinity | IfNotNil $preferredReplicaAntiAffinity }}
{{- $preferedScheduleOnHostAnnotationSelector := printf "volume.kubernetes.io/selected-node=%s" $hostName | IfNotNil $hostName }}
{{- $selectionPolicies := cspGetPolicies $antiAffinityLabelSelector $preferredAntiAffinityLabelSelector $preferedScheduleOnHostAnnotationSelector }}
{{- $pools := createCSPListFromUIDNodeMap (getMapofString .ListItems.cvolPoolNodeList "pools") }}
{{- $poolUids := cspFilterPoolIDs $pools $selectionPolicies | randomize }}
{{- $replicaCount := .Config.ReplicaCount.value | int64 -}}
{{- if lt (len $poolUids) $replicaCount -}}
{{- printf "Not enough pools to provision replica: expected replica count %d actual count %d" $replicaCount (len $poolUids) | fail -}}
{{- end -}}
repeatWith:
resources:
{{- range $k, $v := $poolUids }}
{{- if lt $k $replicaCount }}
- {{ $v | quote }}
{{- end }}
{{- end }}
task: |
{{- $replicaAntiAffinity := .TaskResult.creategetpvc.replicaAntiAffinity -}}
{{- $preferredReplicaAntiAffinity := .TaskResult.creategetpvc.preferredReplicaAntiAffinity }}
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
{{- $zvolWorkers := .Config.ZvolWorkers.value | default "" -}}
kind: CStorVolumeReplica
apiVersion: openebs.io/v1alpha1
metadata:
{{/*
We pluck the cStorPool name from the map[uid]name:
{ "uid1":"name1","uid2":"name2","uid2":"name2" }
The .ListItems.currentRepeatResource gives us the uid of one
of the pools from resources list
*/}}
name: {{ .Volume.owner }}-{{ pluck .ListItems.currentRepeatResource .ListItems.cvolPoolList.pools | first }}
finalizers: ["cstorvolumereplica.openebs.io/finalizer"]
labels:
cstorpool.openebs.io/name: {{ pluck .ListItems.currentRepeatResource .ListItems.cvolPoolList.pools | first }}
cstorpool.openebs.io/uid: {{ .ListItems.currentRepeatResource }}
cstorvolume.openebs.io/name: {{ .Volume.owner }}
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/version: {{ .CAST.version }}
openebs.io/cas-template-name: {{ .CAST.castName }}
{{- if ne $isClone "false" }}
openebs.io/cloned: true
{{- end }}
{{- if ne $replicaAntiAffinity "" }}
openebs.io/replica-anti-affinity: {{ .TaskResult.creategetpvc.replicaAntiAffinity }}
{{- end }}
{{- if ne $preferredReplicaAntiAffinity "" }}
openebs.io/preferred-replica-anti-affinity: {{ .TaskResult.creategetpvc.preferredReplicaAntiAffinity }}
{{- end }}
annotations:
{{- if ne $isClone "false" }}
openebs.io/snapshot: {{ .Volume.snapshotName }}
openebs.io/source-volume: {{ .Volume.sourceVolume }}
{{- end }}
cstorpool.openebs.io/hostname: {{ pluck .ListItems.currentRepeatResource .ListItems.cvolPoolNodeList.pools | first }}
isRestoreVol: {{ .Volume.isRestoreVol }}
openebs.io/storage-class-ref: |
name: {{ .Volume.storageclass }}
resourceVersion: {{ .TaskResult.creategetsc.storageClassVersion }}
spec:
capacity: {{ .Volume.capacity }}
targetIP: {{ .TaskResult.cvolcreateputsvc.clusterIP }}
{{- if ne $zvolWorkers "" }}
zvolWorkers: {{ .Config.ZvolWorkers.value }}
{{- end }}
status:
# phase would be update by appropriate target
phase: ""
versionDetails:
status:
current: {{ .CAST.version }}
dependentsUpgraded: true
desired: {{ .CAST.version }}
autoUpgrade: false
post: |
{{- jsonpath .JsonResult "{.metadata.name}" | trim | addTo "cstorvolumecreatereplica.objectName" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.metadata.spec.capacity}" | trim | saveAs "cstorvolumecreatereplica.capacity" .TaskResult | noop -}}
{{- $replicaPair := jsonpath .JsonResult "pkey=replicas,{@.metadata.name}={@.spec.capacity};" | trim | default "" | splitList ";" -}}
{{- $replicaPair | keyMap "replicaList" .ListItems | noop -}}
---
# runTask to render volume create output as CASVolume
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-output-default
spec:
meta: |
action: output
id: cstorvolumeoutput
kind: CASVolume
apiVersion: v1alpha1
task: |
kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ .Volume.owner }}
labels:
openebs.io/version: {{ .CAST.version }}
openebs.io/cas-template-name: {{ .CAST.castName }}
spec:
capacity: {{ .Volume.capacity }}
iqn: iqn.2016-09.com.openebs.cstor:{{ .Volume.owner }}
targetPortal: {{ .TaskResult.cvolcreateputsvc.clusterIP }}:3260
targetIP: {{ .TaskResult.cvolcreateputsvc.clusterIP }}
targetPort: 3260
replicas: {{ .ListItems.replicaList.replicas | len }}
casType: cstor
---
# runTask to list all cstor target deployment services
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-list-listtargetservice-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
id: listlistsvc
apiVersion: v1
kind: Service
action: list
options: |-
labelSelector: openebs.io/target-service=cstor-target-svc
post: |
{{/*
We create a pair of "clusterIP"=xxxxx and save it for corresponding volume
The per volume is servicePair is identified by unique "namespace/vol-name" key
*/}}
{{- $servicePairs := jsonpath .JsonResult "{range .items[*]}pkey={@.metadata.labels.openebs\\.io/persistent-volume},clusterIP={@.spec.clusterIP};{end}" | trim | default "" | splitList ";" -}}
{{- $servicePairs | keyMap "volumeList" .ListItems | noop -}}
---
#runTask to list all cstor pv
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-list-listpv-default
spec:
meta: |
id: listlistpv
apiVersion: v1
kind: PersistentVolume
action: list
options: |-
labelSelector: openebs.io/cas-type=cstor
post: |
{{- $pvPairs := jsonpath .JsonResult "{range .items[*]}pkey={@.metadata.name},accessModes={@.spec.accessModes[0]},storageClass={@.spec.storageClassName};{end}" | trim | default "" | splitList ";" -}}
{{- $pvPairs | keyMap "volumeList" .ListItems | noop -}}
---
# runTask to list all cstor target pods
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-list-listtargetpod-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
id: listlistctrl
apiVersion: v1
kind: Pod
action: list
options: |-
labelSelector: openebs.io/target=cstor-target
post: |
{{/*
We create a pair of "targetIP"=xxxxx and save it for corresponding volume
The per volume is servicePair is identified by unique "namespace/vol-name" key
*/}}
{{- $targetPairs := jsonpath .JsonResult "{range .items[*]}pkey={@.metadata.labels.openebs\\.io/persistent-volume},targetIP={@.status.podIP},namespace={@.metadata.namespace},targetStatus={@.status.containerStatuses[*].ready};{end}" | trim | default "" | splitList ";" -}}
{{- $targetPairs | keyMap "volumeList" .ListItems | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-list-listcstorvolumereplicacr-default
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
id: listlistrep
apiVersion: openebs.io/v1alpha1
kind: CStorVolumeReplica
action: list
post: |
{{- $replicaPairs := jsonpath .JsonResult "{range .items[*]}pkey={@.metadata.labels.openebs\\.io/persistent-volume},replicaName={@.metadata.name},capacity={@.spec.capacity};{end}" | trim | default "" | splitList ";" -}}
{{- $replicaPairs | keyMap "volumeList" .ListItems | noop -}}
---
# runTask to render volume list output
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-list-output-default
spec:
meta: |
id : listoutput
action: output
kind: CASVolumeList
apiVersion: v1alpha1
task: |
kind: CASVolumeList
items:
{{/*
We have a unique key for each volume in .ListItems.volumeList
We iterate over it to extract various volume properties. These
properties were set in preceding list tasks,
*/}}
{{- range $pkey, $map := .ListItems.volumeList }}
{{- $capacity := pluck "capacity" $map | first | default "" | splitList ", " | first }}
{{- $clusterIP := pluck "clusterIP" $map | first }}
{{- $targetStatus := pluck "targetStatus" $map | first }}
{{- $replicaName := pluck "replicaName" $map | first }}
{{- $namespace := pluck "namespace" $map | first }}
{{- $accessMode := pluck "accessModes" $map | first }}
{{- $storageClass := pluck "storageClass" $map | first }}
{{- $name := $pkey }}
- kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ $name }}
namespace: {{ $namespace }}
annotations:
openebs.io/storage-class: {{ $storageClass | default "" }}
openebs.io/cluster-ips: {{ $clusterIP }}
openebs.io/volume-size: {{ $capacity }}
openebs.io/controller-status: {{ $targetStatus | default "" | replace "true" "running" | replace "false" "notready" }}
spec:
capacity: {{ $capacity }}
iqn: iqn.2016-09.com.openebs.cstor:{{ $name }}
targetPortal: {{ $clusterIP }}:3260
targetIP: {{ $clusterIP }}
targetPort: 3260
replicas: {{ $replicaName | default "" | splitList ", " | len }}
casType: cstor
accessMode: {{ $accessMode | default "" }}
{{- end -}}
---
# runTask to list cStor target deployment service
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-read-listtargetservice-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
{{- $runNamespace := .Config.RunNamespace.value -}}
{{- $pvcServiceAccount := .Config.PVCServiceAccountName.value | default "" -}}
{{- if ne $pvcServiceAccount "" }}
runNamespace: {{ .Volume.runNamespace | saveAs "readlistsvc.derivedNS" .TaskResult }}
{{ else }}
runNamespace: {{ $runNamespace | saveAs "readlistsvc.derivedNS" .TaskResult }}
{{- end }}
apiVersion: v1
id: readlistsvc
kind: Service
action: list
options: |-
labelSelector: openebs.io/target-service=cstor-target-svc,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "readlistsvc.items" .TaskResult | noop -}}
{{- .TaskResult.readlistsvc.items | notFoundErr "target service not found" | saveIf "readlistsvc.notFoundErr" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].spec.clusterIP}" | trim | saveAs "readlistsvc.clusterIP" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.annotations.openebs\\.io/pvc-namespace}" | default "" | trim | saveAs "readlistsvc.pvcNs" .TaskResult | noop -}}
---
# runTask to list cstor volume cr
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-read-listcstorvolumecr-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
runNamespace: {{ .TaskResult.readlistsvc.derivedNS }}
id: readlistcv
apiVersion: openebs.io/v1alpha1
kind: CStorVolume
action: list
options: |-
labelSelector: openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "readlistcv.names" .TaskResult | noop -}}
{{- .TaskResult.readlistcv.names | notFoundErr "cStor Volume CR not found" | saveIf "readlistcv.notFoundErr" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.annotations.openebs\\.io/fs-type}" | trim | default "ext4" | saveAs "readlistcv.fsType" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.annotations.openebs\\.io/lun}" | trim | default "0" | int | saveAs "readlistcv.lun" .TaskResult | noop -}}
---
# runTask to list all replica crs of a volume
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-read-listcstorvolumereplicacr-default
spec:
meta: |
id: readlistrep
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: openebs.io/v1alpha1
kind: CStorVolumeReplica
action: list
options: |-
labelSelector: openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "readlistrep.items" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.annotations.cstorpool\\.openebs\\.io/hostname}" | trim | saveAs "readlistrep.hostname" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.labels.cstorpool\\.openebs\\.io/name}" | trim | saveAs "readlistrep.poolname" .TaskResult | noop -}}
{{- .TaskResult.readlistrep.items | notFoundErr "replicas not found" | saveIf "readlistrep.notFoundErr" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].spec.capacity}" | trim | saveAs "readlistrep.capacity" .TaskResult | noop -}}
---
# runTask to list cStor volume target pods
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-read-listtargetpod-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
runNamespace: {{ .TaskResult.readlistsvc.derivedNS }}
apiVersion: v1
kind: Pod
action: list
id: readlistctrl
options: |-
labelSelector: openebs.io/target=cstor-target,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "readlistctrl.items" .TaskResult | noop -}}
{{- .TaskResult.readlistctrl.items | notFoundErr "target pod not found" | saveIf "readlistctrl.notFoundErr" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].status.podIP}" | trim | saveAs "readlistctrl.podIP" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].spec.nodeName}" | trim | saveAs "readlistctrl.targetNodeName" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].status.containerStatuses[*].ready}" | trim | saveAs "readlistctrl.status" .TaskResult | noop -}}
---
# runTask to render output of read volume task as CAS Volume
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-read-output-default
spec:
meta: |
id : readoutput
action: output
kind: CASVolume
apiVersion: v1alpha1
task: |
{{/* We calculate capacity of the volume here. Pickup capacity from cvr */}}
{{- $capacity := .TaskResult.readlistrep.capacity | default "" | splitList " " | first -}}
kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ .Volume.owner }}
{{/* Render other values into annotation */}}
annotations:
openebs.io/controller-ips: {{ .TaskResult.readlistctrl.podIP | default "" | splitList " " | first }}
openebs.io/controller-status: {{ .TaskResult.readlistctrl.status | default "" | splitList " " | join "," | replace "true" "running" | replace "false" "notready" }}
openebs.io/cvr-names: {{ .TaskResult.readlistrep.items | default "" | splitList " " | join "," }}
openebs.io/node-names: {{ .TaskResult.readlistrep.hostname | default "" | splitList " " | join "," }}
openebs.io/pool-names: {{ .TaskResult.readlistrep.poolname | default "" | splitList " " | join "," }}
openebs.io/controller-node-name: {{ .TaskResult.readlistctrl.targetNodeName | default ""}}
spec:
capacity: {{ $capacity }}
iqn: iqn.2016-09.com.openebs.cstor:{{ .Volume.owner }}
targetPortal: {{ .TaskResult.readlistsvc.clusterIP }}:3260
targetIP: {{ .TaskResult.readlistsvc.clusterIP }}
targetPort: 3260
lun: {{ .TaskResult.readlistcv.lun }}
fsType: {{ .TaskResult.readlistcv.fsType }}
replicas: {{ .TaskResult.readlistrep.capacity | default "" | splitList " " | len }}
casType: cstor
---
# runTask to list the cstorvolume that has to be deleted
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-listcstorvolumecr-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
{{- $runNamespace := .Config.RunNamespace.value -}}
{{- $pvcServiceAccount := .Config.PVCServiceAccountName.value | default "" -}}
{{- if ne $pvcServiceAccount "" }}
runNamespace: {{ .Volume.runNamespace | saveAs "deletelistcsv.derivedNS" .TaskResult }}
{{ else }}
runNamespace: {{ $runNamespace | saveAs "deletelistcsv.derivedNS" .TaskResult }}
{{- end }}
id: deletelistcsv
apiVersion: openebs.io/v1alpha1
kind: CStorVolume
action: list
options: |-
labelSelector: openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "deletelistcsv.names" .TaskResult | noop -}}
{{- .TaskResult.deletelistcsv.names | notFoundErr "cstor volume not found" | saveIf "deletelistcsv.notFoundErr" .TaskResult | noop -}}
{{- .TaskResult.deletelistcsv.names | default "" | splitList " " | isLen 1 | not | verifyErr "total no. cstor volume is not 1" | saveIf "deletelistcsv.verifyErr" .TaskResult | noop -}}
---
# runTask to list target service of volume to delete
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-listtargetservice-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
runNamespace: {{ .TaskResult.deletelistcsv.derivedNS }}
id: deletelistsvc
apiVersion: v1
kind: Service
action: list
options: |-
labelSelector: openebs.io/target-service=cstor-target-svc,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{/*
Save the name of the service. Error if service is missing or more
than one service exists
*/}}
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "deletelistsvc.names" .TaskResult | noop -}}
{{- .TaskResult.deletelistsvc.names | notFoundErr "target service not found" | saveIf "deletelistsvc.notFoundErr" .TaskResult | noop -}}
{{- .TaskResult.deletelistsvc.names | default "" | splitList " " | isLen 1 | not | verifyErr "total no. of target services is not 1" | saveIf "deletelistsvc.verifyErr" .TaskResult | noop -}}
---
# runTask to list target deployment of volume to delete
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-listtargetdeployment-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
runNamespace: {{ .TaskResult.deletelistcsv.derivedNS }}
id: deletelistctrl
apiVersion: apps/v1
kind: Deployment
action: list
options: |-
labelSelector: openebs.io/target=cstor-target,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "deletelistctrl.names" .TaskResult | noop -}}
{{- .TaskResult.deletelistctrl.names | notFoundErr "target deployment not found" | saveIf "deletelistctrl.notFoundErr" .TaskResult | noop -}}
{{- .TaskResult.deletelistctrl.names | default "" | splitList " " | isLen 1 | not | verifyErr "total no. of target deployments is not 1" | saveIf "deletelistctrl.verifyErr" .TaskResult | noop -}}
---
# runTask to list cstorvolumereplica of volume to delete
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-listcstorvolumereplicacr-default
spec:
meta: |
id: deletelistcvr
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: openebs.io/v1alpha1
kind: CStorVolumeReplica
action: list
options: |-
labelSelector: openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{/*
List the names of the cstorvolumereplicas. Error if
cstorvolumereplica is missing, save to a map cvrlist otherwise
*/}}
{{- $cvrs := jsonpath .JsonResult "{range .items[*]}pkey=cvrs,{@.metadata.name}='';{end}" | trim | default "" | splitList ";" -}}
{{- $cvrs | notFoundErr "cstor volume replica not found" | saveIf "deletelistcvr.notFoundErr" .TaskResult | noop -}}
{{- $cvrs | keyMap "cvrlist" .ListItems | noop -}}
---
# runTask to delete cStor volume target service
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-deletetargetservice-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
runNamespace: {{ .TaskResult.deletelistcsv.derivedNS }}
id: deletedeletesvc
apiVersion: v1
kind: Service
action: delete
objectName: {{ .TaskResult.deletelistsvc.names }}
---
# runTask to delete cStor volume target deployment
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-deletetargetdeployment-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
runNamespace: {{ .TaskResult.deletelistcsv.derivedNS }}
id: deletedeletectrl
apiVersion: apps/v1
kind: Deployment
action: delete
objectName: {{ .TaskResult.deletelistctrl.names }}
---
# runTask to delete cstorvolumereplica
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-deletecstorvolumereplicacr-default
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
id: deletedeletecvr
action: delete
kind: CStorVolumeReplica
objectName: {{ keys .ListItems.cvrlist.cvrs | join "," }}
apiVersion: openebs.io/v1alpha1
---
# runTask to delete cstorvolume
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-deletecstorvolumecr-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
runNamespace: {{ .TaskResult.deletelistcsv.derivedNS }}
id: deletedeletecsv
action: delete
apiVersion: openebs.io/v1alpha1
kind: CStorVolume
objectName: {{ pluck "names" .TaskResult.deletelistcsv | first }}
---
# runTask to render output of deleted volume.
# This task only returns the name of volume that is deleted
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-output-default
spec:
meta: |
id: deleteoutput
action: output
kind: CASVolume
apiVersion: v1alpha1
task: |
kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ .Volume.owner }}
---
`
// CstorVolumeArtifacts returns the cstor volume related artifacts
// corresponding to latest version
func CstorVolumeArtifacts() (list artifactList) {
list.Items = append(list.Items, ParseArtifactListFromMultipleYamls(cstorVolumes{})...)
return
}
type cstorVolumes struct{}
// FetchYamls returns all the yamls related to cstor volume in a string
// format
//
// NOTE:
// This is an implementation of MultiYamlFetcher
func (c cstorVolumes) FetchYamls() string {
return cstorVolumeYamls
}
| 1 | 17,757 | not calling this is going to impact current functionality which depends on PVC.. let us still call this and set the values to default as 'none' (or empty) by handling the error from 'get' operation | openebs-maya | go |
@@ -306,7 +306,9 @@ int ioctl(int fd, unsigned long request, ...)
if (gEnableIRQ && fme_irq->evtfd >= 0) {
uint64_t data = 1;
// Write to the eventfd to signal one IRQ event.
- write(fme_irq->evtfd, &data, sizeof(data));
+ if (write(fme_irq->evtfd, &data, sizeof(data)) != sizeof(data)) {
+ FPGA_ERR("IRQ write < 8 bytes");
+ }
}
retval = 0;
errno = 0; | 1 | // Copyright(c) 2017, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
/*
* Mock up driver interactions for testing
*
* Involves redefining ioctl(), open(), close(), others?
*/
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
#include <stdarg.h>
#include <dirent.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <opae/types.h>
#include "common_int.h"
#include "intel-fpga.h"
#include <assert.h>
#include <stdint.h>
#include <safe_string/safe_string.h>
#define __USE_GNU
#include <dlfcn.h>
#define MAX_FD 1024
#define MAX_STRLEN 256
#define FPGA_MOCK_IOVA 0xDECAFBADDEADBEEF
#define FPGA_MOCK_NUM_UMSGS 8
#define FPGA_MOCK_DEV_PATH "/tmp"
#define MOCK_SYSFS_FPGA_CLASS_PATH "/tmp/class/fpga"
#define FPGA_FME_DEV_PREFIX "intel-fpga-fme."
#define FPGA_PORT_DEV_PREFIX "intel-fpga-port."
#define HASH_SUFFIX ".gbshash"
#undef FPGA_MSG
#define FPGA_MSG(fmt, ...) \
printf("MOCK " fmt "\n", ## __VA_ARGS__)
#undef FPGA_ERR
#define FPGA_ERR(fmt, ...) \
printf("MOCK ERROR " fmt "\n", ## __VA_ARGS__)
#undef FPGA_DBG
#ifdef LIBFPGA_DEBUG
#define FPGA_DBG(fmt, ...) \
printf("MOCK DEBUG " fmt "\n", ## __VA_ARGS__)
#else
#define FPGA_DBG(fmt, ...) {}
#endif
/* TODO: track mock devices with dynamic data structure */
static struct mock_dev {
int valid;
fpga_objtype objtype;
char pathname[MAX_STRLEN];
} mock_devs[MAX_FD] = {{0}};
static bool gEnableIRQ = false;
bool mock_enable_irq(bool enable)
{
bool res = gEnableIRQ;
gEnableIRQ = enable;
return res;
}
static bool gEnableErrInj = false;
bool mock_enable_errinj(bool enable)
{
bool res = gEnableErrInj;
gEnableErrInj = enable;
return res;
}
typedef int (*open_func)(const char *pathname, int flags);
typedef int (*open_mode_func)(const char *pathname, int flags, mode_t m);
typedef int (*close_func)(int fd);
typedef int (*ioctl_func)(int fd, unsigned long request, char *argp);
typedef DIR * (*opendir_func)(const char *name);
typedef ssize_t (*readlink_func)(const char *pathname, char *buf, size_t bufsiz);
typedef int (*__xstat_func)(int ver, const char *pathname, struct stat *buf);
uint32_t stupid_hash(uint32_t *buf, uint32_t len_in_words) {
uint32_t i;
uint32_t hash = 0;
for (i = 0; i < len_in_words; ++i)
hash ^= buf[i];
return hash;
}
static char* rewrite_sysfs_path(const char* src, char* dst, int len) {
int prefix_len = strlen(SYSFS_FPGA_CLASS_PATH);
if (strncmp(SYSFS_FPGA_CLASS_PATH, src, prefix_len) == 0) {
strncpy_s(dst, len, MOCK_SYSFS_FPGA_CLASS_PATH, strlen(MOCK_SYSFS_FPGA_CLASS_PATH));
strncpy_s(dst + prefix_len, len - prefix_len, src + prefix_len, len - prefix_len);
} else {
strncpy_s(dst, len, src, len);
}
return dst;
}
int ioctl(int fd, unsigned long request, ...)
{
va_list argp;
int retval = -1;
char *err;
errno = EINVAL;
uint32_t hash;
char hashfilename[MAX_STRLEN];
va_start(argp, request);
/* check where ioctl is going */
if (fd >= MAX_FD || !mock_devs[fd].valid) {
FPGA_DBG("real ioctl() called");
dlerror(); /* clear errors */
ioctl_func real_ioctl = (ioctl_func)dlsym(RTLD_NEXT, "ioctl");
err = dlerror();
if (NULL != err){
FPGA_ERR("dlsym() failed: %s", err);
goto out_EINVAL;
}
char *arg = va_arg(argp, char *);
if (NULL != real_ioctl) {
return real_ioctl(fd, request, arg);
}
}
if (fd >= MAX_FD)
return -1;
FPGA_DBG("mock ioctl() called");
// Returns error when Error injection enabled
if (gEnableErrInj) {
goto out_EINVAL;
}
switch (mock_devs[fd].objtype) {
case FPGA_DEVICE: /* FME */
switch (request) {
case FPGA_FME_PORT_RELEASE:
FPGA_DBG("got FPGA_FME_PORT_RELEASE");
struct fpga_fme_port_release *port_release =
va_arg(argp, struct fpga_fme_port_release *);
if (!port_release) {
FPGA_MSG("port_release is NULL");
goto out_EINVAL;
}
if (port_release->argsz != sizeof(*port_release)) {
FPGA_MSG("wrong structure size");
goto out_EINVAL;
}
if (port_release->flags != 0) {
FPGA_MSG("unexpected flags %u", port_release->flags);
goto out_EINVAL;
}
if (port_release->port_id != 0) {
FPGA_MSG("unexpected port ID %u", port_release->port_id);
goto out_EINVAL;
}
retval = 0;
errno = 0;
break;
case FPGA_FME_PORT_PR:
FPGA_DBG("got FPGA_FME_PORT_PR");
struct fpga_fme_port_pr *pr = va_arg(argp, struct fpga_fme_port_pr *);
if (!pr) {
FPGA_MSG("pr is NULL");
goto out_EINVAL;
}
if (pr->argsz != sizeof(*pr)) {
FPGA_MSG("wrong structure size");
goto out_EINVAL;
}
if (pr->flags != 0) {
FPGA_MSG("unexpected flags %u", pr->flags);
goto out_EINVAL;
}
if (pr->port_id != 0) {
FPGA_MSG("unexpected port ID %u", pr->port_id);
goto out_EINVAL;
}
if (pr->buffer_size == 0) {
FPGA_MSG("buffer size is 0");
goto out_EINVAL;
}
if (!pr->buffer_address) {
FPGA_MSG("buffer address is NULL");
goto out_EINVAL;
}
pr->status = 0; /* return success */
/* TODO: reflect reconfiguration (change afu_id?) */
/* generate hash for bitstream data */
hash = stupid_hash((uint32_t*)pr->buffer_address, pr->buffer_size / 4);
/* write hash to file in tmp */
strncpy_s(hashfilename, MAX_STRLEN, mock_devs[fd].pathname, strlen(mock_devs[fd].pathname) + 1);
strncat_s(hashfilename, MAX_STRLEN, HASH_SUFFIX, sizeof(HASH_SUFFIX));
FILE* hashfile = fopen(hashfilename, "w");
if (hashfile) {
fwrite(&hash, sizeof(hash), 1, hashfile);
fclose(hashfile);
}
retval = 0;
errno = 0;
break;
case FPGA_FME_PORT_ASSIGN:
FPGA_DBG("got FPGA_FME_PORT_ASSIGN");
struct fpga_fme_port_assign *port_assign =
va_arg(argp, struct fpga_fme_port_assign *);
if (!port_assign) {
FPGA_MSG("port_assign is NULL");
goto out_EINVAL;
}
if (port_assign->argsz != sizeof(*port_assign)) {
FPGA_MSG("wrong structure size");
goto out_EINVAL;
}
if (port_assign->flags != 0) {
FPGA_MSG("unexpected flags %u", port_assign->flags);
goto out_EINVAL;
}
if (port_assign->port_id != 0) {
FPGA_MSG("unexpected port ID %u", port_assign->port_id);
goto out_EINVAL;
}
retval = 0;
errno = 0;
break;
case FPGA_FME_GET_INFO:
FPGA_DBG("got FPGA_FME_GET_INFO");
struct fpga_fme_info *fme_info =
va_arg(argp, struct fpga_fme_info *);
if (!fme_info) {
FPGA_MSG("fme_info is NULL");
goto out_EINVAL;
}
if (fme_info->argsz != sizeof(*fme_info)) {
FPGA_MSG("wrong structure size");
goto out_EINVAL;
}
if (fme_info->flags != 0) {
FPGA_MSG("unexpected flags %u", fme_info->flags);
goto out_EINVAL;
}
if (fme_info->capability != 0) {
FPGA_MSG("unexpected capability %u", fme_info->capability);
goto out_EINVAL;
}
fme_info->capability = gEnableIRQ ? FPGA_FME_CAP_ERR_IRQ : 0;
retval = 0;
errno = 0;
break;
case FPGA_FME_ERR_SET_IRQ:
FPGA_DBG("got FPGA_FME_ERR_SET_IRQ");
struct fpga_fme_err_irq_set *fme_irq =
va_arg(argp, struct fpga_fme_err_irq_set *);
if (!fme_irq) {
FPGA_MSG("fme_irq is NULL");
goto out_EINVAL;
}
if (fme_irq->argsz != sizeof(*fme_irq)) {
FPGA_MSG("wrong structure size");
goto out_EINVAL;
}
if (fme_irq->flags != 0) {
FPGA_MSG("unexpected flags %u", fme_irq->flags);
goto out_EINVAL;
}
if (gEnableIRQ && fme_irq->evtfd >= 0) {
uint64_t data = 1;
// Write to the eventfd to signal one IRQ event.
write(fme_irq->evtfd, &data, sizeof(data));
}
retval = 0;
errno = 0;
break;
default:
FPGA_DBG("Unknown FME IOCTL request %lu", request);
break;
}
break;
case FPGA_ACCELERATOR: /* PORT */
switch (request) {
case FPGA_PORT_DMA_MAP:
FPGA_DBG("got FPGA_PORT_DMA_MAP");
struct fpga_port_dma_map *dma_map = va_arg(argp, struct fpga_port_dma_map *);
if (!dma_map) {
FPGA_MSG("dma_map is NULL");
goto out_EINVAL;
}
if (dma_map->argsz != sizeof(*dma_map)) {
FPGA_MSG("wrong structure size");
goto out_EINVAL;
}
if (!dma_map->user_addr) {
FPGA_MSG("mapping address is NULL");
goto out_EINVAL;
}
/* TODO: check alignment */
if (dma_map->length == 0) {
FPGA_MSG("mapping size is 0");
goto out_EINVAL;
}
dma_map->iova = FPGA_MOCK_IOVA; /* return something */
retval = 0;
errno = 0;
break;
case FPGA_PORT_DMA_UNMAP:
FPGA_DBG("got FPGA_PORT_DMA_UNMAP");
struct fpga_port_dma_unmap *dma_unmap = va_arg(argp, struct fpga_port_dma_unmap *);
if (!dma_unmap) {
FPGA_MSG("dma_unmap is NULL");
goto out_EINVAL;
}
if (dma_unmap->argsz != sizeof(*dma_unmap)) {
FPGA_MSG("wrong structure size");
goto out_EINVAL;
}
if (dma_unmap->iova != FPGA_MOCK_IOVA) {
FPGA_MSG("unexpected IOVA (0x%llx)", dma_unmap->iova);
goto out_EINVAL;
}
retval = 0;
errno = 0;
break;
case FPGA_PORT_RESET:
FPGA_DBG("got FPGA_PORT_RESET");
retval = 0;
break;
case FPGA_PORT_GET_REGION_INFO:
FPGA_DBG("got FPGA_PORT_GET_REGION_INFO");
struct fpga_port_region_info *rinfo = va_arg(argp, struct fpga_port_region_info *);
if (!rinfo) {
FPGA_MSG("rinfo is NULL");
goto out_EINVAL;
}
if (rinfo->argsz != sizeof(*rinfo)) {
FPGA_MSG("wrong structure size");
goto out_EINVAL;
}
if (rinfo->index != 0) {
FPGA_MSG("unsupported MMIO index");
goto out_EINVAL;
}
if (rinfo->padding != 0) {
FPGA_MSG("unsupported padding");
goto out_EINVAL;
}
rinfo->flags = FPGA_REGION_READ | FPGA_REGION_WRITE | FPGA_REGION_MMAP;
rinfo->size = 0x40000;
rinfo->offset = 0;
retval = 0;
errno = 0;
break;
case FPGA_PORT_GET_INFO:
FPGA_DBG("got FPGA_PORT_GET_INFO");
struct fpga_port_info *pinfo = va_arg(argp, struct fpga_port_info *);
if (!pinfo) {
FPGA_MSG("pinfo is NULL");
goto out_EINVAL;
}
if (pinfo->argsz != sizeof(*pinfo)) {
FPGA_MSG("wrong structure size");
goto out_EINVAL;
}
pinfo->flags = 0;
pinfo->num_regions = 1;
pinfo->num_umsgs = 8;
if (gEnableIRQ) {
pinfo->capability = FPGA_PORT_CAP_ERR_IRQ | FPGA_PORT_CAP_UAFU_IRQ;
pinfo->num_uafu_irqs = 1;
} else {
pinfo->capability = 0;
pinfo->num_uafu_irqs = 0;
}
retval = 0;
errno = 0;
break;
case FPGA_PORT_ERR_SET_IRQ:
FPGA_DBG("got FPGA_PORT_ERR_SET_IRQ");
struct fpga_port_err_irq_set *port_irq =
va_arg(argp, struct fpga_port_err_irq_set *);
if (!port_irq) {
FPGA_MSG("port_irq is NULL");
goto out_EINVAL;
}
if (port_irq->argsz != sizeof(*port_irq)) {
FPGA_MSG("wrong structure size");
goto out_EINVAL;
}
if (port_irq->flags != 0) {
FPGA_MSG("unexpected flags %u", port_irq->flags);
goto out_EINVAL;
}
if (gEnableIRQ && port_irq->evtfd >= 0) {
uint64_t data = 1;
// Write to the eventfd to signal one IRQ event.
write(port_irq->evtfd, &data, sizeof(data));
}
retval = 0;
errno = 0;
break;
case FPGA_PORT_UAFU_SET_IRQ:
FPGA_DBG("got FPGA_PORT_UAFU_SET_IRQ");
struct fpga_port_uafu_irq_set *uafu_irq =
va_arg(argp, struct fpga_port_uafu_irq_set *);
if (!uafu_irq) {
FPGA_MSG("uafu_irq is NULL");
goto out_EINVAL;
}
if (uafu_irq->argsz < sizeof(*uafu_irq)) {
FPGA_MSG("wrong structure size");
goto out_EINVAL;
}
if (uafu_irq->flags != 0) {
FPGA_MSG("unexpected flags %u", uafu_irq->flags);
goto out_EINVAL;
}
if (gEnableIRQ) {
uint32_t i;
uint64_t data = 1;
// Write to each eventfd to signal one IRQ event.
for (i = 0 ; i < uafu_irq->count ; ++i) {
if (uafu_irq->evtfd[i] >= 0)
write(uafu_irq->evtfd[i], &data, sizeof(data));
}
}
retval = 0;
errno = 0;
break;
case FPGA_PORT_UMSG_SET_MODE:
FPGA_DBG("got FPGA_PORT_UMSG_SET_MODE");
struct fpga_port_umsg_cfg *ucfg = va_arg(argp, struct fpga_port_umsg_cfg *);
if (!ucfg) {
FPGA_MSG("ucfg is NULL");
goto out_EINVAL;
}
if (ucfg->argsz != sizeof(*ucfg)) {
FPGA_MSG("wrong structure size");
goto out_EINVAL;
}
if (ucfg->flags != 0) {
FPGA_MSG("unexpected flags %u", ucfg->flags);
goto out_EINVAL;
}
/* TODO: check hint_bitmap */
if (ucfg->hint_bitmap >> FPGA_MOCK_NUM_UMSGS) {
FPGA_MSG("invalid hint_bitmap 0x%x", ucfg->hint_bitmap);
goto out_EINVAL;
}
retval = 0;
errno = 0;
break;
case FPGA_PORT_UMSG_SET_BASE_ADDR:
FPGA_DBG("got FPGA_PORT_UMSG_SET_BASE_ADDR");
struct fpga_port_umsg_base_addr *ubase = va_arg(argp, struct fpga_port_umsg_base_addr *);
if (!ubase) {
FPGA_MSG("ubase is NULL");
goto out_EINVAL;
}
if (ubase->argsz != sizeof(*ubase)) {
FPGA_MSG("wrong structure size");
goto out_EINVAL;
}
if (ubase->flags != 0) {
FPGA_MSG("unexpected flags %u", ubase->flags);
goto out_EINVAL;
}
/* TODO: check iova */
retval = 0;
errno = 0;
break;
case FPGA_PORT_UMSG_ENABLE:
FPGA_DBG("got FPGA_PORT_UMSG_ENABLE");
retval = 0;
break;
case FPGA_PORT_UMSG_DISABLE:
FPGA_DBG("got FPGA_PORT_UMSG_DISABLE");
retval = 0;
break;
default:
FPGA_DBG("Unknown PORT IOCTL request %lu", request);
break;
}
break;
}
out:
va_end(argp);
return retval;
out_EINVAL:
retval = -1;
errno = EINVAL;
goto out;
}
struct afu_header {
uint64_t afu_dfh;
uint64_t afu_id_l;
uint64_t afu_id_h;
} __attribute__((packed));
int open(const char* pathname, int flags, ...) {
int fd;
char path[MAX_STRLEN];
char* err;
int prefix_len = strlen(FPGA_DEV_PATH);
va_list argp;
dlerror(); /* clear errors */
open_func real_open = (open_func)dlsym(RTLD_NEXT, "open");
assert(real_open);
err = dlerror();
if (err) {
FPGA_ERR("dlsym() failed: %s", err);
errno = EINVAL;
return -1;
}
FPGA_DBG("open(\"%s\", %i)", pathname, flags);
if (strncmp(FPGA_DEV_PATH "/" FPGA_FME_DEV_PREFIX, pathname, prefix_len + strlen(FPGA_FME_DEV_PREFIX) - 2) == 0 ) {
FPGA_DBG("accessing FME device");
/* rewrite path */
strncpy_s(path, sizeof(path), FPGA_MOCK_DEV_PATH, prefix_len);
strncpy_s(path + prefix_len, sizeof(path) - prefix_len,
pathname + prefix_len, (MAX_STRLEN - 1 - prefix_len));
/* call real open */
FPGA_DBG("-> open(\"%s\", %i)", path, flags);
fd = real_open(path, flags);
/* store info */
strncpy_s(mock_devs[fd].pathname, strlen(mock_devs[fd].pathname), path, MAX_STRLEN);
mock_devs[fd].objtype = FPGA_DEVICE;
mock_devs[fd].valid = 1;
} else if (strncmp(FPGA_DEV_PATH "/" FPGA_PORT_DEV_PREFIX, pathname, prefix_len + 1 + strlen(FPGA_PORT_DEV_PREFIX)) == 0 ) {
struct afu_header header;
ssize_t sz;
ssize_t res;
FPGA_DBG("accessing PORT device");
/* rewrite path */
strncpy_s(path, sizeof(path), FPGA_MOCK_DEV_PATH, prefix_len);
strncpy_s(path + prefix_len, sizeof(path) - prefix_len, pathname + prefix_len, MAX_STRLEN - prefix_len);
/* call real open */
FPGA_DBG("-> open(\"%s\", %i)", path, flags);
fd = real_open(path, flags);
if (fd < 0)
return fd;
/* store info */
strncpy_s(mock_devs[fd].pathname, sizeof(mock_devs[fd].pathname), path, MAX_STRLEN - 1);
mock_devs[fd].objtype = FPGA_ACCELERATOR;
mock_devs[fd].valid = 1;
/* Write the AFU header to offset 0, where the mmap call for CSR space 0 will point. */
header.afu_dfh = 0x1000000000001070ULL;
header.afu_id_l = 0xf89e433683f9040bULL;
header.afu_id_h = 0xd8424dc4a4a3c413ULL;
lseek(fd, 0, SEEK_SET);
sz = 0;
do
{
res = write(fd, &header+sz, sizeof(header)-sz);
if (res < 0)
break;
sz += res;
} while((size_t)sz < sizeof(header));
lseek(fd, 0, SEEK_SET);
} else if (strncmp(SYSFS_FPGA_CLASS_PATH, pathname, strlen(SYSFS_FPGA_CLASS_PATH)) == 0 ) {
/* rewrite path */
rewrite_sysfs_path(pathname, path, MAX_STRLEN);
/* call real open */
FPGA_DBG("-> open(\"%s\", %i)", path, flags);
fd = real_open(path, flags);
} else {
FPGA_DBG("-> open(\"%s\", %i)", pathname, flags);
if (flags & O_CREAT){
va_start(argp, flags);
mode_t arg = va_arg(argp, mode_t);
fd = ((open_mode_func)real_open)(pathname, flags, arg);
va_end(argp);
}else{
fd = real_open(pathname, flags);
}
}
return fd;
}
int close(int fd)
{
int retval;
char *err;
dlerror(); /* clear errors */
close_func real_close = (close_func)dlsym(RTLD_NEXT, "close");
assert(real_close);
err = dlerror();
if (err) {
FPGA_ERR("dlsym() failed: %s", err);
errno = EINVAL;
return -1;
}
FPGA_DBG("close(%i)", fd);
retval = real_close(fd);
if (retval >= 0 && fd < MAX_FD && mock_devs[fd].valid) {
/* drop mock device */
mock_devs[fd].valid = 0;
}
return retval;
}
DIR *opendir(const char *name)
{
char *err;
char s[MAX_STRLEN];
dlerror(); /* clear errors */
opendir_func real_opendir = (opendir_func)dlsym(RTLD_NEXT, "opendir");
assert(real_opendir);
err = dlerror();
if (err) {
FPGA_ERR("dlsym() failed: %s", err);
errno = EINVAL;
return NULL;
}
FPGA_DBG("opendir(%s)", name);
rewrite_sysfs_path(name, s, MAX_STRLEN);
FPGA_DBG("-> opendir(%s)", s);
return real_opendir(s);
}
ssize_t readlink(const char *pathname, char *buf, size_t bufsiz)
{
char *err;
char s[MAX_STRLEN];
dlerror(); /* clear errors */
readlink_func real_readlink = (readlink_func)dlsym(RTLD_NEXT, "readlink");
assert(real_readlink);
err = dlerror();
if (err) {
FPGA_ERR("dlsym() failed: %s", err);
errno = EINVAL;
return -1;
}
FPGA_DBG("readlink(%s)", pathname);
rewrite_sysfs_path(pathname, s, MAX_STRLEN);
FPGA_DBG("-> readlink(%s)", s);
return real_readlink(s, buf, bufsiz);
}
/* stat() redirects to __xstat() */
int __xstat(int ver, const char *pathname, struct stat *buf)
{
char *err;
char s[MAX_STRLEN];
dlerror(); /* clear errors */
__xstat_func real_xstat = (__xstat_func)dlsym(RTLD_NEXT, "__xstat");
assert(real_xstat);
err = dlerror();
if (err) {
FPGA_ERR("dlsym() failed: %s", err);
errno = EINVAL;
return -1;
}
FPGA_DBG("stat(%s)", pathname);
rewrite_sysfs_path(pathname, s, MAX_STRLEN);
FPGA_DBG("-> stat(%s)", s);
return real_xstat(ver, s, buf);
}
/* lstat() redirects to __lxstat() */
int __lxstat(int ver, const char *pathname, struct stat *buf)
{
char *err;
char s[MAX_STRLEN];
dlerror(); /* clear errors */
__xstat_func real_lxstat = (__xstat_func)dlsym(RTLD_NEXT, "__lxstat");
assert(real_lxstat);
err = dlerror();
if (err) {
FPGA_ERR("dlsym() failed: %s", err);
errno = EINVAL;
return -1;
}
FPGA_DBG("lstat(%s)", pathname);
rewrite_sysfs_path(pathname, s, MAX_STRLEN);
FPGA_DBG("-> lstat(%s)", s);
return real_lxstat(ver, s, buf);
}
fpga_result fpgaReconfigureSlot(fpga_handle fpga,
uint32_t slot,
const uint8_t *bitstream,
size_t bitstream_len,
int flags)
{
(void)flags; /* silence unused-parameter warning */
if (!fpga ||
(((struct _fpga_handle *)fpga)->magic != FPGA_HANDLE_MAGIC) ||
(((struct _fpga_handle *)fpga)->fddev < 0)) {
FPGA_MSG("Invalid handle object");
return FPGA_INVALID_PARAM;
}
if (slot > 2) {
FPGA_MSG("Invalid slot: %d", slot);
return FPGA_INVALID_PARAM;
}
if (!bitstream) {
FPGA_MSG("NULL bitstream pointer");
return FPGA_INVALID_PARAM;
}
if (!bitstream_len) {
FPGA_MSG("bitstream length is 0");
return FPGA_INVALID_PARAM;
}
uint32_t hash = stupid_hash((uint32_t*)bitstream, bitstream_len / 4);
char* hashfilename = "/tmp/intel-fpga-fme.0.gbshash";
FILE* hashfile = fopen(hashfilename, "w");
if (hashfile) {
fwrite(&hash, sizeof(hash), 1, hashfile);
fclose(hashfile);
}
return FPGA_OK;
}
| 1 | 15,913 | `!=` is no the same as `<` | OPAE-opae-sdk | c |
@@ -0,0 +1,8 @@
+from .transformer import (FFN, MultiheadAttention, Transformer,
+ TransformerDecoder, TransformerDecoderLayer,
+ TransformerEncoder, TransformerEncoderLayer)
+
+__all__ = [
+ 'FFN', 'MultiheadAttention', 'Transformer', 'TransformerDecoder',
+ 'TransformerDecoderLayer', 'TransformerEncoder', 'TransformerEncoderLayer'
+] | 1 | 1 | 21,481 | No need to create a dir for transformer. Simply move transformer.py into mmdet/models/utils/ | open-mmlab-mmdetection | py |
|
@@ -531,6 +531,10 @@ public class MainActivity extends CastEnabledActivity {
bottomSheetCallback.onSlide(null, 1.0f);
} else if (Intent.ACTION_VIEW.equals(intent.getAction())) {
handleDeeplink(intent.getData());
+ } else if (Intent.ACTION_CREATE_SHORTCUT.equals(intent.getAction())) {
+ intent = new Intent(this, SelectSubscriptionActivity.class);
+ intent.setAction(Intent.ACTION_CREATE_SHORTCUT);
+ startActivity(intent);
}
// to avoid handling the intent twice when the configuration changes
setIntent(new Intent(MainActivity.this, MainActivity.class)); | 1 | package de.danoeh.antennapod.activity;
import android.annotation.TargetApi;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.res.Configuration;
import android.media.AudioManager;
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
import android.os.Handler;
import android.os.Looper;
import android.util.DisplayMetrics;
import android.util.Log;
import android.view.KeyEvent;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.widget.EditText;
import android.widget.FrameLayout;
import android.widget.Toast;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.appcompat.app.ActionBarDrawerToggle;
import androidx.appcompat.app.AlertDialog;
import androidx.appcompat.widget.Toolbar;
import androidx.core.content.ContextCompat;
import androidx.core.view.ViewCompat;
import androidx.drawerlayout.widget.DrawerLayout;
import androidx.fragment.app.Fragment;
import androidx.fragment.app.FragmentManager;
import androidx.fragment.app.FragmentTransaction;
import androidx.recyclerview.widget.RecyclerView;
import com.bumptech.glide.Glide;
import com.google.android.material.bottomsheet.BottomSheetBehavior;
import com.google.android.material.snackbar.Snackbar;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.Validate;
import org.greenrobot.eventbus.EventBus;
import org.greenrobot.eventbus.Subscribe;
import org.greenrobot.eventbus.ThreadMode;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.core.event.MessageEvent;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.receiver.MediaButtonReceiver;
import de.danoeh.antennapod.core.service.playback.PlaybackService;
import de.danoeh.antennapod.core.util.StorageUtils;
import de.danoeh.antennapod.core.util.download.AutoUpdateManager;
import de.danoeh.antennapod.dialog.RatingDialog;
import de.danoeh.antennapod.fragment.AddFeedFragment;
import de.danoeh.antennapod.fragment.AudioPlayerFragment;
import de.danoeh.antennapod.fragment.DownloadsFragment;
import de.danoeh.antennapod.fragment.EpisodesFragment;
import de.danoeh.antennapod.fragment.FeedItemlistFragment;
import de.danoeh.antennapod.fragment.NavDrawerFragment;
import de.danoeh.antennapod.fragment.PlaybackHistoryFragment;
import de.danoeh.antennapod.fragment.QueueFragment;
import de.danoeh.antennapod.fragment.SearchFragment;
import de.danoeh.antennapod.fragment.SubscriptionFragment;
import de.danoeh.antennapod.fragment.TransitionEffect;
import de.danoeh.antennapod.preferences.PreferenceUpgrader;
import de.danoeh.antennapod.ui.appstartintent.MainActivityStarter;
import de.danoeh.antennapod.ui.common.ThemeUtils;
import de.danoeh.antennapod.view.LockableBottomSheetBehavior;
/**
* The activity that is shown when the user launches the app.
*/
public class MainActivity extends CastEnabledActivity {
private static final String TAG = "MainActivity";
public static final String MAIN_FRAGMENT_TAG = "main";
public static final String PREF_NAME = "MainActivityPrefs";
public static final String PREF_IS_FIRST_LAUNCH = "prefMainActivityIsFirstLaunch";
public static final String EXTRA_FRAGMENT_TAG = "fragment_tag";
public static final String EXTRA_FRAGMENT_ARGS = "fragment_args";
public static final String EXTRA_FEED_ID = "fragment_feed_id";
public static final String EXTRA_REFRESH_ON_START = "refresh_on_start";
public static final String EXTRA_STARTED_FROM_SEARCH = "started_from_search";
public static final String KEY_GENERATED_VIEW_ID = "generated_view_id";
private @Nullable DrawerLayout drawerLayout;
private @Nullable ActionBarDrawerToggle drawerToggle;
private View navDrawer;
private LockableBottomSheetBehavior sheetBehavior;
private long lastBackButtonPressTime = 0;
private RecyclerView.RecycledViewPool recycledViewPool = new RecyclerView.RecycledViewPool();
private int lastTheme = 0;
@NonNull
public static Intent getIntentToOpenFeed(@NonNull Context context, long feedId) {
Intent intent = new Intent(context.getApplicationContext(), MainActivity.class);
intent.putExtra(MainActivity.EXTRA_FEED_ID, feedId);
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP);
return intent;
}
@Override
public void onCreate(Bundle savedInstanceState) {
lastTheme = UserPreferences.getNoTitleTheme();
setTheme(lastTheme);
if (savedInstanceState != null) {
ensureGeneratedViewIdGreaterThan(savedInstanceState.getInt(KEY_GENERATED_VIEW_ID, 0));
}
super.onCreate(savedInstanceState);
StorageUtils.checkStorageAvailability(this);
setContentView(R.layout.main);
recycledViewPool.setMaxRecycledViews(R.id.view_type_episode_item, 25);
drawerLayout = findViewById(R.id.drawer_layout);
navDrawer = findViewById(R.id.navDrawerFragment);
setNavDrawerSize();
final FragmentManager fm = getSupportFragmentManager();
if (fm.findFragmentByTag(MAIN_FRAGMENT_TAG) == null) {
String lastFragment = NavDrawerFragment.getLastNavFragment(this);
if (ArrayUtils.contains(NavDrawerFragment.NAV_DRAWER_TAGS, lastFragment)) {
loadFragment(lastFragment, null);
} else {
try {
loadFeedFragmentById(Integer.parseInt(lastFragment), null);
} catch (NumberFormatException e) {
// it's not a number, this happens if we removed
// a label from the NAV_DRAWER_TAGS
// give them a nice default...
loadFragment(QueueFragment.TAG, null);
}
}
}
FragmentTransaction transaction = fm.beginTransaction();
NavDrawerFragment navDrawerFragment = new NavDrawerFragment();
transaction.replace(R.id.navDrawerFragment, navDrawerFragment, NavDrawerFragment.TAG);
AudioPlayerFragment audioPlayerFragment = new AudioPlayerFragment();
transaction.replace(R.id.audioplayerFragment, audioPlayerFragment, AudioPlayerFragment.TAG);
transaction.commit();
checkFirstLaunch();
PreferenceUpgrader.checkUpgrades(this);
View bottomSheet = findViewById(R.id.audioplayerFragment);
sheetBehavior = (LockableBottomSheetBehavior) BottomSheetBehavior.from(bottomSheet);
sheetBehavior.setPeekHeight((int) getResources().getDimension(R.dimen.external_player_height));
sheetBehavior.setHideable(false);
sheetBehavior.setBottomSheetCallback(bottomSheetCallback);
}
/**
* ViewCompat.generateViewId stores the current ID in a static variable.
* When the process is killed, the variable gets reset.
* This makes sure that we do not get ID collisions
* and therefore errors when trying to restore state from another view.
*/
@SuppressWarnings("StatementWithEmptyBody")
private void ensureGeneratedViewIdGreaterThan(int minimum) {
while (ViewCompat.generateViewId() <= minimum) {
// Generate new IDs
}
}
@Override
protected void onSaveInstanceState(@NonNull Bundle outState) {
super.onSaveInstanceState(outState);
outState.putInt(KEY_GENERATED_VIEW_ID, ViewCompat.generateViewId());
}
private final BottomSheetBehavior.BottomSheetCallback bottomSheetCallback =
new BottomSheetBehavior.BottomSheetCallback() {
@Override
public void onStateChanged(@NonNull View view, int state) {
if (state == BottomSheetBehavior.STATE_COLLAPSED) {
onSlide(view, 0.0f);
} else if (state == BottomSheetBehavior.STATE_EXPANDED) {
onSlide(view, 1.0f);
}
}
@Override
public void onSlide(@NonNull View view, float slideOffset) {
AudioPlayerFragment audioPlayer = (AudioPlayerFragment) getSupportFragmentManager()
.findFragmentByTag(AudioPlayerFragment.TAG);
if (audioPlayer == null) {
return;
}
if (slideOffset == 0.0f) { //STATE_COLLAPSED
audioPlayer.scrollToPage(AudioPlayerFragment.POS_COVER);
}
float condensedSlideOffset = Math.max(0.0f, Math.min(0.2f, slideOffset - 0.2f)) / 0.2f;
audioPlayer.getExternalPlayerHolder().setAlpha(1 - condensedSlideOffset);
audioPlayer.getExternalPlayerHolder().setVisibility(
condensedSlideOffset > 0.99f ? View.GONE : View.VISIBLE);
}
};
public void setupToolbarToggle(@NonNull Toolbar toolbar, boolean displayUpArrow) {
if (drawerLayout != null) { // Tablet layout does not have a drawer
if (drawerToggle != null) {
drawerLayout.removeDrawerListener(drawerToggle);
}
drawerToggle = new ActionBarDrawerToggle(this, drawerLayout, toolbar,
R.string.drawer_open, R.string.drawer_close);
drawerLayout.addDrawerListener(drawerToggle);
drawerToggle.syncState();
drawerToggle.setDrawerIndicatorEnabled(!displayUpArrow);
drawerToggle.setToolbarNavigationClickListener(v -> getSupportFragmentManager().popBackStack());
} else if (!displayUpArrow) {
toolbar.setNavigationIcon(null);
} else {
toolbar.setNavigationIcon(ThemeUtils.getDrawableFromAttr(this, R.attr.homeAsUpIndicator));
toolbar.setNavigationOnClickListener(v -> getSupportFragmentManager().popBackStack());
}
}
private void checkFirstLaunch() {
SharedPreferences prefs = getSharedPreferences(PREF_NAME, MODE_PRIVATE);
if (prefs.getBoolean(PREF_IS_FIRST_LAUNCH, true)) {
loadFragment(AddFeedFragment.TAG, null);
new Handler(Looper.getMainLooper()).postDelayed(() -> {
if (drawerLayout != null) { // Tablet layout does not have a drawer
drawerLayout.openDrawer(navDrawer);
}
}, 1500);
// for backward compatibility, we only change defaults for fresh installs
UserPreferences.setUpdateInterval(12);
SharedPreferences.Editor edit = prefs.edit();
edit.putBoolean(PREF_IS_FIRST_LAUNCH, false);
edit.apply();
}
}
public boolean isDrawerOpen() {
return drawerLayout != null && navDrawer != null && drawerLayout.isDrawerOpen(navDrawer);
}
public LockableBottomSheetBehavior getBottomSheet() {
return sheetBehavior;
}
public void setPlayerVisible(boolean visible) {
getBottomSheet().setLocked(!visible);
FrameLayout mainView = findViewById(R.id.main_view);
ViewGroup.MarginLayoutParams params = (ViewGroup.MarginLayoutParams) mainView.getLayoutParams();
params.setMargins(0, 0, 0, visible ? (int) getResources().getDimension(R.dimen.external_player_height) : 0);
mainView.setLayoutParams(params);
findViewById(R.id.audioplayerFragment).setVisibility(visible ? View.VISIBLE : View.GONE);
}
public RecyclerView.RecycledViewPool getRecycledViewPool() {
return recycledViewPool;
}
public void loadFragment(String tag, Bundle args) {
Log.d(TAG, "loadFragment(tag: " + tag + ", args: " + args + ")");
Fragment fragment;
switch (tag) {
case QueueFragment.TAG:
fragment = new QueueFragment();
break;
case EpisodesFragment.TAG:
fragment = new EpisodesFragment();
break;
case DownloadsFragment.TAG:
fragment = new DownloadsFragment();
break;
case PlaybackHistoryFragment.TAG:
fragment = new PlaybackHistoryFragment();
break;
case AddFeedFragment.TAG:
fragment = new AddFeedFragment();
break;
case SubscriptionFragment.TAG:
fragment = new SubscriptionFragment();
break;
default:
// default to the queue
fragment = new QueueFragment();
tag = QueueFragment.TAG;
args = null;
break;
}
if (args != null) {
fragment.setArguments(args);
}
NavDrawerFragment.saveLastNavFragment(this, tag);
loadFragment(fragment);
}
public void loadFeedFragmentById(long feedId, Bundle args) {
Fragment fragment = FeedItemlistFragment.newInstance(feedId);
if (args != null) {
fragment.setArguments(args);
}
NavDrawerFragment.saveLastNavFragment(this, String.valueOf(feedId));
loadFragment(fragment);
}
private void loadFragment(Fragment fragment) {
FragmentManager fragmentManager = getSupportFragmentManager();
// clear back stack
for (int i = 0; i < fragmentManager.getBackStackEntryCount(); i++) {
fragmentManager.popBackStack();
}
FragmentTransaction t = fragmentManager.beginTransaction();
t.replace(R.id.main_view, fragment, MAIN_FRAGMENT_TAG);
fragmentManager.popBackStack();
// TODO: we have to allow state loss here
// since this function can get called from an AsyncTask which
// could be finishing after our app has already committed state
// and is about to get shutdown. What we *should* do is
// not commit anything in an AsyncTask, but that's a bigger
// change than we want now.
t.commitAllowingStateLoss();
if (drawerLayout != null) { // Tablet layout does not have a drawer
drawerLayout.closeDrawer(navDrawer);
}
}
public void loadChildFragment(Fragment fragment, TransitionEffect transition) {
Validate.notNull(fragment);
FragmentTransaction transaction = getSupportFragmentManager().beginTransaction();
switch (transition) {
case FADE:
transaction.setCustomAnimations(R.anim.fade_in, R.anim.fade_out);
break;
case SLIDE:
transaction.setCustomAnimations(
R.anim.slide_right_in,
R.anim.slide_left_out,
R.anim.slide_left_in,
R.anim.slide_right_out);
break;
}
transaction
.hide(getSupportFragmentManager().findFragmentByTag(MAIN_FRAGMENT_TAG))
.add(R.id.main_view, fragment, MAIN_FRAGMENT_TAG)
.addToBackStack(null)
.commit();
}
public void loadChildFragment(Fragment fragment) {
loadChildFragment(fragment, TransitionEffect.NONE);
}
@Override
protected void onPostCreate(Bundle savedInstanceState) {
super.onPostCreate(savedInstanceState);
if (drawerToggle != null) { // Tablet layout does not have a drawer
drawerToggle.syncState();
}
}
@Override
public void onConfigurationChanged(Configuration newConfig) {
super.onConfigurationChanged(newConfig);
if (drawerToggle != null) { // Tablet layout does not have a drawer
drawerToggle.onConfigurationChanged(newConfig);
}
setNavDrawerSize();
}
private void setNavDrawerSize() {
if (drawerToggle == null) { // Tablet layout does not have a drawer
return;
}
float screenPercent = getResources().getInteger(R.integer.nav_drawer_screen_size_percent) * 0.01f;
int width = (int) (getScreenWidth() * screenPercent);
int maxWidth = (int) getResources().getDimension(R.dimen.nav_drawer_max_screen_size);
navDrawer.getLayoutParams().width = Math.min(width, maxWidth);
}
private int getScreenWidth() {
DisplayMetrics displayMetrics = new DisplayMetrics();
getWindowManager().getDefaultDisplay().getMetrics(displayMetrics);
return displayMetrics.widthPixels;
}
@Override
protected void onRestoreInstanceState(Bundle savedInstanceState) {
super.onRestoreInstanceState(savedInstanceState);
if (getBottomSheet().getState() == BottomSheetBehavior.STATE_EXPANDED) {
bottomSheetCallback.onSlide(null, 1.0f);
}
}
@Override
public void onStart() {
super.onStart();
EventBus.getDefault().register(this);
RatingDialog.init(this);
if (lastTheme != UserPreferences.getNoTitleTheme()) {
finish();
startActivity(new Intent(this, MainActivity.class));
}
}
@Override
protected void onResume() {
super.onResume();
StorageUtils.checkStorageAvailability(this);
handleNavIntent();
RatingDialog.check();
}
@Override
protected void onStop() {
super.onStop();
EventBus.getDefault().unregister(this);
}
@TargetApi(Build.VERSION_CODES.ICE_CREAM_SANDWICH)
@Override
public void onTrimMemory(int level) {
super.onTrimMemory(level);
Glide.get(this).trimMemory(level);
}
@Override
public void onLowMemory() {
super.onLowMemory();
Glide.get(this).clearMemory();
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
if (drawerToggle != null && drawerToggle.onOptionsItemSelected(item)) { // Tablet layout does not have a drawer
return true;
} else if (item.getItemId() == android.R.id.home) {
if (getSupportFragmentManager().getBackStackEntryCount() > 0) {
getSupportFragmentManager().popBackStack();
}
return true;
} else {
return super.onOptionsItemSelected(item);
}
}
@Override
public void onBackPressed() {
if (isDrawerOpen()) {
drawerLayout.closeDrawer(navDrawer);
} else if (sheetBehavior.getState() == BottomSheetBehavior.STATE_EXPANDED) {
sheetBehavior.setState(BottomSheetBehavior.STATE_COLLAPSED);
} else if (getSupportFragmentManager().getBackStackEntryCount() != 0) {
super.onBackPressed();
} else {
switch (UserPreferences.getBackButtonBehavior()) {
case OPEN_DRAWER:
if (drawerLayout != null) { // Tablet layout does not have drawer
drawerLayout.openDrawer(navDrawer);
}
break;
case SHOW_PROMPT:
new AlertDialog.Builder(this)
.setMessage(R.string.close_prompt)
.setPositiveButton(R.string.yes, (dialogInterface, i) -> MainActivity.super.onBackPressed())
.setNegativeButton(R.string.no, null)
.setCancelable(false)
.show();
break;
case DOUBLE_TAP:
if (lastBackButtonPressTime < System.currentTimeMillis() - 2000) {
Toast.makeText(this, R.string.double_tap_toast, Toast.LENGTH_SHORT).show();
lastBackButtonPressTime = System.currentTimeMillis();
} else {
super.onBackPressed();
}
break;
case GO_TO_PAGE:
if (NavDrawerFragment.getLastNavFragment(this).equals(UserPreferences.getBackButtonGoToPage())) {
super.onBackPressed();
} else {
loadFragment(UserPreferences.getBackButtonGoToPage(), null);
}
break;
default: super.onBackPressed();
}
}
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onEventMainThread(MessageEvent event) {
Log.d(TAG, "onEvent(" + event + ")");
Snackbar snackbar = showSnackbarAbovePlayer(event.message, Snackbar.LENGTH_SHORT);
if (event.action != null) {
snackbar.setAction(getString(R.string.undo), v -> event.action.run());
}
}
private void handleNavIntent() {
Intent intent = getIntent();
if (intent.hasExtra(EXTRA_FEED_ID) || intent.hasExtra(EXTRA_FRAGMENT_TAG) || intent.hasExtra(EXTRA_REFRESH_ON_START)) {
Log.d(TAG, "handleNavIntent()");
String tag = intent.getStringExtra(EXTRA_FRAGMENT_TAG);
Bundle args = intent.getBundleExtra(EXTRA_FRAGMENT_ARGS);
boolean refreshOnStart = intent.getBooleanExtra(EXTRA_REFRESH_ON_START, false);
if (refreshOnStart) {
AutoUpdateManager.runImmediate(this);
}
long feedId = intent.getLongExtra(EXTRA_FEED_ID, 0);
if (tag != null) {
loadFragment(tag, args);
} else if (feedId > 0) {
if (intent.getBooleanExtra(EXTRA_STARTED_FROM_SEARCH, false)) {
loadChildFragment(FeedItemlistFragment.newInstance(feedId));
} else {
loadFeedFragmentById(feedId, args);
}
}
sheetBehavior.setState(BottomSheetBehavior.STATE_COLLAPSED);
} else if (intent.getBooleanExtra(MainActivityStarter.EXTRA_OPEN_PLAYER, false)) {
sheetBehavior.setState(BottomSheetBehavior.STATE_EXPANDED);
bottomSheetCallback.onSlide(null, 1.0f);
} else if (Intent.ACTION_VIEW.equals(intent.getAction())) {
handleDeeplink(intent.getData());
}
// to avoid handling the intent twice when the configuration changes
setIntent(new Intent(MainActivity.this, MainActivity.class));
}
@Override
protected void onNewIntent(Intent intent) {
super.onNewIntent(intent);
setIntent(intent);
handleNavIntent();
}
public Snackbar showSnackbarAbovePlayer(CharSequence text, int duration) {
Snackbar s;
if (getBottomSheet().getState() == BottomSheetBehavior.STATE_COLLAPSED) {
s = Snackbar.make(findViewById(R.id.main_view), text, duration);
if (findViewById(R.id.audioplayerFragment).getVisibility() == View.VISIBLE) {
s.setAnchorView(findViewById(R.id.audioplayerFragment));
}
} else {
s = Snackbar.make(findViewById(android.R.id.content), text, duration);
}
s.show();
return s;
}
public Snackbar showSnackbarAbovePlayer(int text, int duration) {
return showSnackbarAbovePlayer(getResources().getText(text), duration);
}
/**
* Handles the deep link incoming via App Actions.
* Performs an in-app search or opens the relevant feature of the app
* depending on the query.
*
* @param uri incoming deep link
*/
private void handleDeeplink(Uri uri) {
if (uri == null || uri.getPath() == null) {
return;
}
Log.d(TAG, "Handling deeplink: " + uri.toString());
switch (uri.getPath()) {
case "/deeplink/search":
String query = uri.getQueryParameter("query");
if (query == null) {
return;
}
this.loadChildFragment(SearchFragment.newInstance(query));
break;
case "/deeplink/main":
String feature = uri.getQueryParameter("page");
if (feature == null) {
return;
}
switch (feature) {
case "DOWNLOADS":
loadFragment(DownloadsFragment.TAG, null);
break;
case "HISTORY":
loadFragment(PlaybackHistoryFragment.TAG, null);
break;
case "EPISODES":
loadFragment(EpisodesFragment.TAG, null);
break;
case "QUEUE":
loadFragment(QueueFragment.TAG, null);
break;
case "SUBSCRIPTIONS":
loadFragment(SubscriptionFragment.TAG, null);
break;
default:
showSnackbarAbovePlayer(getString(R.string.app_action_not_found, feature),
Snackbar.LENGTH_LONG);
return;
}
break;
default:
break;
}
}
//Hardware keyboard support
@Override
public boolean onKeyUp(int keyCode, KeyEvent event) {
View currentFocus = getCurrentFocus();
if (currentFocus instanceof EditText) {
return super.onKeyUp(keyCode, event);
}
AudioManager audioManager = (AudioManager) getSystemService(AUDIO_SERVICE);
Integer customKeyCode = null;
switch (keyCode) {
case KeyEvent.KEYCODE_P:
customKeyCode = KeyEvent.KEYCODE_MEDIA_PLAY_PAUSE;
break;
case KeyEvent.KEYCODE_J: //Fallthrough
case KeyEvent.KEYCODE_A:
case KeyEvent.KEYCODE_COMMA:
customKeyCode = KeyEvent.KEYCODE_MEDIA_REWIND;
break;
case KeyEvent.KEYCODE_K: //Fallthrough
case KeyEvent.KEYCODE_D:
case KeyEvent.KEYCODE_PERIOD:
customKeyCode = KeyEvent.KEYCODE_MEDIA_FAST_FORWARD;
break;
case KeyEvent.KEYCODE_PLUS: //Fallthrough
case KeyEvent.KEYCODE_W:
audioManager.adjustStreamVolume(AudioManager.STREAM_MUSIC,
AudioManager.ADJUST_RAISE, AudioManager.FLAG_SHOW_UI);
return true;
case KeyEvent.KEYCODE_MINUS: //Fallthrough
case KeyEvent.KEYCODE_S:
audioManager.adjustStreamVolume(AudioManager.STREAM_MUSIC,
AudioManager.ADJUST_LOWER, AudioManager.FLAG_SHOW_UI);
return true;
case KeyEvent.KEYCODE_M:
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
audioManager.adjustStreamVolume(AudioManager.STREAM_MUSIC,
AudioManager.ADJUST_TOGGLE_MUTE, AudioManager.FLAG_SHOW_UI);
return true;
}
break;
}
if (customKeyCode != null) {
Intent intent = new Intent(this, PlaybackService.class);
intent.putExtra(MediaButtonReceiver.EXTRA_KEYCODE, customKeyCode);
ContextCompat.startForegroundService(this, intent);
return true;
}
return super.onKeyUp(keyCode, event);
}
}
| 1 | 21,025 | Can't you just add `SelectSubscriptionActivity` directly to the manifest instead of opening `MainActivity` that then starts it? | AntennaPod-AntennaPod | java |
@@ -46,6 +46,7 @@ var (
rpmsDir = app.Flag("rpm-dir", "Directory containing built RPMs.").Required().ExistingDir()
distTag = app.Flag("dist-tag", "The distribution tag the SPEC will be built with.").Required().String()
workerTar = app.Flag("worker-tar", "Full path to worker_chroot.tar.gz. If this argument is empty, specs will be parsed in the host environment.").ExistingFile()
+ runCheck = app.Flag("run-check", "Run the check during package build").Bool()
logFile = exe.LogFileFlag(app)
logLevel = exe.LogLevelFlag(app)
) | 1 | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// specreader is a tool to parse spec files into a JSON structure
package main
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"microsoft.com/pkggen/internal/buildpipeline"
"microsoft.com/pkggen/internal/directory"
"microsoft.com/pkggen/internal/file"
"microsoft.com/pkggen/internal/rpm"
"microsoft.com/pkggen/internal/safechroot"
"gopkg.in/alecthomas/kingpin.v2"
"microsoft.com/pkggen/internal/exe"
"microsoft.com/pkggen/internal/logger"
"microsoft.com/pkggen/internal/pkgjson"
)
const (
defaultWorkerCount = "10"
)
// parseResult holds the worker results from parsing a SPEC file.
type parseResult struct {
packages []*pkgjson.Package
err error
}
var (
app = kingpin.New("specreader", "A tool to parse spec dependencies into JSON")
specsDir = exe.InputDirFlag(app, "Directory to scan for SPECS")
output = exe.OutputFlag(app, "Output file to export the JSON")
workers = app.Flag("workers", "Number of concurrent goroutines to parse with").Default(defaultWorkerCount).Int()
buildDir = app.Flag("build-dir", "Directory to store temporary files while parsing.").String()
srpmsDir = app.Flag("srpm-dir", "Directory containing SRPMs.").Required().ExistingDir()
rpmsDir = app.Flag("rpm-dir", "Directory containing built RPMs.").Required().ExistingDir()
distTag = app.Flag("dist-tag", "The distribution tag the SPEC will be built with.").Required().String()
workerTar = app.Flag("worker-tar", "Full path to worker_chroot.tar.gz. If this argument is empty, specs will be parsed in the host environment.").ExistingFile()
logFile = exe.LogFileFlag(app)
logLevel = exe.LogLevelFlag(app)
)
func main() {
app.Version(exe.ToolkitVersion)
kingpin.MustParse(app.Parse(os.Args[1:]))
logger.InitBestEffort(*logFile, *logLevel)
if *workers <= 0 {
logger.Log.Panicf("Value in --workers must be greater than zero. Found %d", *workers)
}
err := parseSPECsWrapper(*buildDir, *specsDir, *rpmsDir, *srpmsDir, *distTag, *output, *workerTar, *workers)
logger.PanicOnError(err)
}
// parseSPECsWrapper wraps parseSPECs to conditionally run it inside a chroot.
// If workerTar is non-empty, parsing will occur inside a chroot, otherwise it will run on the host system.
func parseSPECsWrapper(buildDir, specsDir, rpmsDir, srpmsDir, distTag, outputFile, workerTar string, workers int) (err error) {
var (
chroot *safechroot.Chroot
packageRepo *pkgjson.PackageRepo
)
if workerTar != "" {
const leaveFilesOnDisk = false
chroot, err = createChroot(workerTar, buildDir, specsDir, srpmsDir)
if err != nil {
return
}
defer chroot.Close(leaveFilesOnDisk)
}
doParse := func() error {
var parseError error
packageRepo, parseError = parseSPECs(specsDir, rpmsDir, srpmsDir, distTag, workers)
return parseError
}
if chroot != nil {
logger.Log.Info("Parsing SPECs inside a chroot environment")
err = chroot.Run(doParse)
} else {
logger.Log.Info("Parsing SPECs in the host environment")
err = doParse()
}
if err != nil {
return
}
b, err := json.MarshalIndent(packageRepo, "", " ")
if err != nil {
logger.Log.Error("Unable to marshal package info JSON")
return
}
err = file.Write(string(b), outputFile)
if err != nil {
logger.Log.Errorf("Failed to write file (%s)", outputFile)
return
}
return
}
// createChroot creates a chroot to parse SPECs inside of.
func createChroot(workerTar, buildDir, specsDir, srpmsDir string) (chroot *safechroot.Chroot, err error) {
const (
chrootName = "specparser_chroot"
existingDir = false
leaveFilesOnDisk = false
)
// Mount the specs and srpms directories to an identical path inside the chroot.
// Since specreader saves the full paths to specs in its output that grapher will then consume,
// the pathing needs to be preserved from the host system.
var extraDirectories []string
extraMountPoints := []*safechroot.MountPoint{
safechroot.NewMountPoint(specsDir, specsDir, "", safechroot.BindMountPointFlags, ""),
safechroot.NewMountPoint(srpmsDir, srpmsDir, "", safechroot.BindMountPointFlags, ""),
}
chrootDir := filepath.Join(buildDir, chrootName)
chroot = safechroot.NewChroot(chrootDir, existingDir)
err = chroot.Initialize(workerTar, extraDirectories, extraMountPoints)
if err != nil {
return
}
// If this is not a regular build then copy in all of the SPECs since there are no bind mounts.
if !buildpipeline.IsRegularBuild() {
dirsToCopy := []string{specsDir, srpmsDir}
for _, dir := range dirsToCopy {
dirInChroot := filepath.Join(chroot.RootDir(), dir)
err = directory.CopyContents(dir, dirInChroot)
if err != nil {
closeErr := chroot.Close(leaveFilesOnDisk)
if closeErr != nil {
logger.Log.Errorf("Failed to close chroot, err: %s", err)
}
return
}
}
}
return
}
// parseSPECs will parse all specs in specsDir and return a summary of the SPECs.
func parseSPECs(specsDir, rpmsDir, srpmsDir, distTag string, workers int) (packageRepo *pkgjson.PackageRepo, err error) {
var (
packageList []*pkgjson.Package
wg sync.WaitGroup
specFiles []string
)
packageRepo = &pkgjson.PackageRepo{}
// Find the filepath for each spec in the SPECS directory.
specSearch, err := filepath.Abs(filepath.Join(specsDir, "**/*.spec"))
if err == nil {
specFiles, err = filepath.Glob(specSearch)
}
if err != nil {
logger.Log.Errorf("Failed to find *.spec files. Check that %s is the correct directory. Error: %v", specsDir, err)
return
}
results := make(chan *parseResult, len(specFiles))
requests := make(chan string, len(specFiles))
cancel := make(chan struct{})
// Start the workers now so they begin working as soon as a new job is buffered.
for i := 0; i < workers; i++ {
wg.Add(1)
go readSpecWorker(requests, results, cancel, &wg, distTag, rpmsDir, srpmsDir)
}
for _, specFile := range specFiles {
requests <- specFile
}
close(requests)
// Receive the parsed spec structures from the workers and place them into a list.
for i := 0; i < len(specFiles); i++ {
parseResult := <-results
if parseResult.err != nil {
err = parseResult.err
close(cancel)
break
}
packageList = append(packageList, parseResult.packages...)
}
logger.Log.Debug("Waiting for outstanding workers to finish")
wg.Wait()
if err != nil {
return
}
packageRepo.Repo = packageList
sortPackages(packageRepo)
return
}
// sortPackages orders the package lists into reasonable and deterministic orders.
// Sort the main package list by "Name", "Version", "SRPM"
// Sort each nested Requires/BuildRequires by "Name", "Version"
func sortPackages(packageRepo *pkgjson.PackageRepo) {
sort.Slice(packageRepo.Repo, func(i, j int) bool {
iName := packageRepo.Repo[i].Provides.Name + packageRepo.Repo[i].Provides.Version + packageRepo.Repo[i].SrpmPath
jName := packageRepo.Repo[j].Provides.Name + packageRepo.Repo[j].Provides.Version + packageRepo.Repo[j].SrpmPath
return strings.Compare(iName, jName) < 0
})
for _, pkg := range packageRepo.Repo {
sort.Slice(pkg.Requires, func(i, j int) bool {
iName := pkg.Requires[i].Name + pkg.Requires[i].Version
jName := pkg.Requires[j].Name + pkg.Requires[j].Version
return strings.Compare(iName, jName) < 0
})
sort.Slice(pkg.BuildRequires, func(i, j int) bool {
iName := pkg.BuildRequires[i].Name + pkg.BuildRequires[i].Version
jName := pkg.BuildRequires[j].Name + pkg.BuildRequires[j].Version
return strings.Compare(iName, jName) < 0
})
}
}
// readspec is a goroutine that takes a full filepath to a spec file and scrapes it into the Specdef structure
// Concurrency is limited by the size of the semaphore channel passed in. Too many goroutines at once can deplete
// available filehandles.
func readSpecWorker(requests <-chan string, results chan<- *parseResult, cancel <-chan struct{}, wg *sync.WaitGroup, distTag, rpmsDir, srpmsDir string) {
const (
emptyQueryFormat = ``
querySrpm = `%{NAME}-%{VERSION}-%{RELEASE}.src.rpm`
queryProvidedPackages = `rpm %{ARCH}/%{nvra}.rpm\n[provides %{PROVIDENEVRS}\n][requires %{REQUIRENEVRS}\n][arch %{ARCH}\n]`
)
defer wg.Done()
defines := rpm.DefaultDefines()
defines[rpm.DistTagDefine] = distTag
for specfile := range requests {
select {
case <-cancel:
logger.Log.Debug("Cancellation signal received")
return
default:
}
result := &parseResult{}
providerList := []*pkgjson.Package{}
buildRequiresList := []*pkgjson.PackageVer{}
sourcedir := filepath.Dir(specfile)
// Find the SRPM associated with the SPEC.
srpmResults, err := rpm.QuerySPEC(specfile, sourcedir, querySrpm, defines, rpm.QueryHeaderArgument)
if err != nil {
result.err = err
results <- result
continue
}
srpmPath := filepath.Join(srpmsDir, srpmResults[0])
isCompatible, err := rpm.SpecExclusiveArchIsCompatible(specfile, sourcedir, defines)
if err != nil {
result.err = err
results <- result
continue
}
if !isCompatible {
logger.Log.Debugf(`Skipping (%s) since it cannot be built on current architecture.`, specfile)
results <- result
continue
}
// Find every package that the spec provides
queryResults, err := rpm.QuerySPEC(specfile, sourcedir, queryProvidedPackages, defines, rpm.QueryBuiltRPMHeadersArgument)
if err == nil && len(queryResults) != 0 {
providerList, err = parseProvides(rpmsDir, srpmPath, queryResults)
if err != nil {
result.err = err
results <- result
continue
}
}
// Query the BuildRequires fields from this spec and turn them into an array of PackageVersions
queryResults, err = rpm.QuerySPEC(specfile, sourcedir, emptyQueryFormat, defines, rpm.BuildRequiresArgument)
if err == nil && len(queryResults) != 0 {
buildRequiresList, err = parsePackageVersionList(queryResults)
if err != nil {
result.err = err
results <- result
continue
}
}
// Every package provided by a spec will have the same BuildRequires and SrpmPath
for i := range providerList {
providerList[i].SpecPath = specfile
providerList[i].SourceDir = sourcedir
providerList[i].Requires, err = condensePackageVersionArray(providerList[i].Requires, specfile)
if err != nil {
break
}
providerList[i].BuildRequires, err = condensePackageVersionArray(buildRequiresList, specfile)
if err != nil {
break
}
}
if err != nil {
result.err = err
} else {
result.packages = providerList
}
// Submit the result to the main thread, the deferred function will clear the semaphore.
results <- result
}
}
// parseProvides parses a newline separated list of Provides, Requires, and Arch from a single spec file.
// Several Provides may be in a row, so for each Provide the parser needs to look ahead for the first line that starts
// with a Require then ingest that line and every subsequent as a Requires until it sees a line that begins with Arch.
// Provide: package
// Require: requiresa = 1.0
// Require: requiresb
// Arch: noarch
// The return is an array of Package structures, one for each Provides in the spec (implicit and explicit).
func parseProvides(rpmsDir, srpmPath string, list []string) (providerlist []*pkgjson.Package, err error) {
var (
reqlist []*pkgjson.PackageVer
packagearch string
rpmPath string
listEntry []string
sublistEntry []string
)
const (
tag = iota
value = iota
)
listEntry = strings.SplitN(list[0], " ", 2)
err = minSliceLength(listEntry, 2)
if err != nil {
return
}
if listEntry[tag] != "rpm" {
err = fmt.Errorf("first element returned by rpmspec was not an rpm tag: %v", list)
return
}
rpmPath = filepath.Join(rpmsDir, listEntry[value])
logger.Log.Trace(list)
for i := range list {
listEntry = strings.SplitN(list[i], " ", 2)
err = minSliceLength(listEntry, 1)
if err != nil {
return
}
if listEntry[tag] == "rpm" {
logger.Log.Trace("rpm ", listEntry[value])
rpmPath = filepath.Join(rpmsDir, listEntry[value])
} else if listEntry[tag] == "provides" {
logger.Log.Trace("provides ", listEntry[value])
for _, v := range list[i:] {
sublistEntry = strings.SplitN(v, " ", 2)
err = minSliceLength(sublistEntry, 2)
if err != nil {
return
}
if sublistEntry[tag] == "requires" {
logger.Log.Trace(" requires ", sublistEntry[value])
var requirePkgVers []*pkgjson.PackageVer
requirePkgVers, err = parsePackageVersions(sublistEntry[value])
if err != nil {
return
}
filteredRequirePkgVers := filterOutDynamicDependencies(requirePkgVers)
reqlist = append(reqlist, filteredRequirePkgVers...)
} else if sublistEntry[tag] == "arch" {
logger.Log.Trace(" arch ", sublistEntry[value])
packagearch = sublistEntry[value]
break
}
}
var newProviderVer []*pkgjson.PackageVer
newProviderVer, err = parsePackageVersions(listEntry[value])
if err != nil {
return
}
providerPkgVer := &pkgjson.Package{
Provides: newProviderVer[0],
SrpmPath: srpmPath,
RpmPath: rpmPath,
Architecture: packagearch,
Requires: reqlist,
}
providerlist = append(providerlist, providerPkgVer)
reqlist = nil
}
}
logger.Log.Tracef("Provider: %+v", providerlist)
return
}
// parsePackageVersions takes a package name and splits it into a set of PackageVer structures.
// Normally a list of length 1 is returned, however parsePackageVersions is also responsible for
// identifying if the package name is an "or" condition and returning all options.
func parsePackageVersions(packagename string) (newpkgs []*pkgjson.PackageVer, err error) {
const (
NameField = iota
ConditionField = iota
VersionField = iota
)
packageSplit := strings.Split(packagename, " ")
err = minSliceLength(packageSplit, 1)
if err != nil {
return
}
// If first character of the packagename is a "(" then its an "or" condition
if packagename[0] == '(' {
return parseOrCondition(packagename)
}
newpkg := &pkgjson.PackageVer{Name: packageSplit[NameField]}
if len(packageSplit) == 1 {
// Nothing to do, no condition or version was found.
} else if packageSplit[ConditionField] != "or" {
newpkg.Condition = packageSplit[ConditionField]
newpkg.Version = packageSplit[VersionField]
} else {
// Replace the name with the first name that appears in (foo or bar)
substr := packageSplit[NameField][1:]
newpkg.Name = substr
}
newpkgs = append(newpkgs, newpkg)
return
}
// parsePackageVersionList takes the output from rpmspec --buildrequires
// and parses it into an array of PackageVersion structures
func parsePackageVersionList(pkgList []string) (pkgVerList []*pkgjson.PackageVer, err error) {
for _, pkgListEntry := range pkgList {
var parsedPkgVers []*pkgjson.PackageVer
parsedPkgVers, err = parsePackageVersions(pkgListEntry)
if err != nil {
return
}
pkgVerList = append(pkgVerList, parsedPkgVers...)
}
return
}
// condensePackageVersionArray deduplicates entries in an array of Package Versions
// and represents double conditionals in a single PackageVersion structure.
// If a non-blank package version is specified more than twice in a SPEC then return an error.
func condensePackageVersionArray(packagelist []*pkgjson.PackageVer, specfile string) (processedPkgList []*pkgjson.PackageVer, err error) {
for _, pkg := range packagelist {
nameMatch := false
for i, processedPkg := range processedPkgList {
if pkg.Name == processedPkg.Name {
nameMatch = true
if processedPkg.Version == "" {
processedPkgList[i].Version = pkg.Version
processedPkgList[i].Condition = pkg.Condition
break
} else if processedPkg.SVersion == "" {
processedPkgList[i].SVersion = pkg.Version
processedPkgList[i].SCondition = pkg.Condition
break
} else if processedPkg.Version == processedPkg.SVersion {
processedPkgList[i].Version = pkg.Version
processedPkgList[i].SVersion = pkg.Version
processedPkgList[i].Condition = pkg.Condition
processedPkgList[i].SCondition = pkg.Condition
break
} else {
err = fmt.Errorf("spec (%s) attempted to set more than two conditions for package (%s)", specfile, processedPkg.Name)
return
}
}
}
if nameMatch == false {
processedPkgList = append(processedPkgList, pkg)
}
}
return
}
// parseOrCondition splits a package name like (foo or bar) and returns both foo and bar as separate requirements.
func parseOrCondition(packagename string) (versions []*pkgjson.PackageVer, err error) {
logger.Log.Warnf("'OR' clause found (%s), make sure both packages are available. Please refer to 'docs/how_it_works/3_package_building.md#or-clauses' for explanation of limitations.", packagename)
packagename = strings.ReplaceAll(packagename, "(", "")
packagename = strings.ReplaceAll(packagename, ")", "")
packageSplit := strings.Split(packagename, " or ")
err = minSliceLength(packageSplit, 1)
if err != nil {
return
}
versions = make([]*pkgjson.PackageVer, 0, len(packageSplit))
for _, condition := range packageSplit {
var parsedPkgVers []*pkgjson.PackageVer
parsedPkgVers, err = parsePackageVersions(condition)
if err != nil {
return
}
versions = append(versions, parsedPkgVers...)
}
return
}
// minSliceLength checks that a string slice is >= a minimum length and returns an error
// if the condition is not met.
func minSliceLength(slice []string, minLength int) (err error) {
if len(slice) < minLength {
return fmt.Errorf("slice is not required length (minLength = %d) %+v", minLength, slice)
}
return
}
// filterOutDynamicDependencies removes dynamic RPM dependencies from pkgVers.
// These entries are automatically injected by RPM when processing an SRPM
// and represent an internal RPM feature requirement.
//
// For example if a SPEC uses a Lua scriplet, RPM will inject a requirement for
// `rpmlib(BuiltinLuaScripts)` so that future RPM invocations on the SRPM know
// what features it needs to properly handle the package.
//
// These dynamic dependencies are not backed by a real package or a provides, but
// are instead an internal notation of RPM itself. Filter these out from the list of
// requirements of actual packages.
func filterOutDynamicDependencies(pkgVers []*pkgjson.PackageVer) (filteredPkgVers []*pkgjson.PackageVer) {
const dynamicDependencyPrefix = "rpmlib("
for _, req := range pkgVers {
if strings.HasPrefix(req.Name, dynamicDependencyPrefix) {
logger.Log.Debugf("Ignoring dynamic dependency: %s", req.Name)
continue
}
filteredPkgVers = append(filteredPkgVers, req)
}
return
}
| 1 | 14,506 | Maybe say "the spec's %check section" so it's clearer what we mean. | microsoft-CBL-Mariner | go |
@@ -649,7 +649,7 @@ class ColumnSorting extends BasePlugin {
* @returns {Number} Physical row index.
*/
onModifyRow(row, source) {
- if (this.blockPluginTranslation === false && source !== this.pluginName) {
+ if (this.blockPluginTranslation === false && source !== this.pluginName && this.isSorted()) {
const rowInMapper = this.rowsMapper.getValueByIndex(row);
row = rowInMapper === null ? row : rowInMapper;
} | 1 | import {
addClass,
removeClass,
} from '../../helpers/dom/element';
import { isUndefined, isDefined } from '../../helpers/mixed';
import { isObject } from '../../helpers/object';
import { arrayMap } from '../../helpers/array';
import { rangeEach } from '../../helpers/number';
import BasePlugin from '../_base';
import { registerPlugin } from './../../plugins';
import Hooks from '../../pluginHooks';
import { isPressedCtrlKey } from '../../utils/keyStateObserver';
import { ColumnStatesManager } from './columnStatesManager';
import {
getNextSortOrder,
areValidSortStates,
getHeaderSpanElement,
isFirstLevelColumnHeader
} from './utils';
import { getClassedToRemove, getClassesToAdd } from './domHelpers';
import RowsMapper from './rowsMapper';
import { rootComparator } from './rootComparator';
import { registerRootComparator, sort } from './sortService';
const APPEND_COLUMN_CONFIG_STRATEGY = 'append';
const REPLACE_COLUMN_CONFIG_STRATEGY = 'replace';
const PLUGIN_KEY = 'columnSorting';
registerRootComparator(PLUGIN_KEY, rootComparator);
Hooks.getSingleton().register('beforeColumnSort');
Hooks.getSingleton().register('afterColumnSort');
// DIFF - MultiColumnSorting & ColumnSorting: changed configuration documentation.
/**
* @plugin ColumnSorting
*
* @description
* This plugin sorts the view by columns (but does not sort the data source!). To enable the plugin, set the
* {@link Options#columnSorting} property to the correct value (see the examples below).
*
* @example
* ```js
* // as boolean
* columnSorting: true
*
* // as an object with initial sort config (sort ascending for column at index 1)
* columnSorting: {
* initialConfig: {
* column: 1,
* sortOrder: 'asc'
* }
* }
*
* // as an object which define specific sorting options for all columns
* columnSorting: {
* sortEmptyCells: true, // true = the table sorts empty cells, false = the table moves all empty cells to the end of the table (by default)
* indicator: true, // true = shows indicator for all columns (by default), false = don't show indicator for columns
* headerAction: true, // true = allow to click on the headers to sort (by default), false = turn off possibility to click on the headers to sort
* compareFunctionFactory: function(sortOrder, columnMeta) {
* return function(value, nextValue) {
* // Some value comparisons which will return -1, 0 or 1...
* }
* }
* }
*
* // as an object passed to the `column` property, allows specifying a custom options for the desired column.
* // please take a look at documentation of `column` property: https://docs.handsontable.com/pro/Options.html#columns
* columns: [{
* columnSorting: {
* indicator: false, // disable indicator for the first column,
* sortEmptyCells: true,
* headerAction: false, // clicks on the first column won't sort
* compareFunctionFactory: function(sortOrder, columnMeta) {
* return function(value, nextValue) {
* return 0; // Custom compare function for the first column (don't sort)
* }
* }
* }
* }]```
*
* @dependencies ObserveChanges
*/
class ColumnSorting extends BasePlugin {
constructor(hotInstance) {
super(hotInstance);
/**
* Instance of column state manager.
*
* @private
* @type {ColumnStatesManager}
*/
this.columnStatesManager = new ColumnStatesManager();
/**
* Object containing visual row indexes mapped to data source indexes.
*
* @private
* @type {RowsMapper}
*/
this.rowsMapper = new RowsMapper(this);
/**
* It blocks the plugin translation, this flag is checked inside `onModifyRow` callback.
*
* @private
* @type {Boolean}
*/
this.blockPluginTranslation = true;
/**
* Cached column properties from plugin like i.e. `indicator`, `headerAction`.
*
* @private
* @type {Map<number, Object>}
*/
this.columnMetaCache = new Map();
/**
* Main settings key designed for the plugin.
*
* @private
* @type {String}
*/
this.pluginKey = PLUGIN_KEY;
}
/**
* Checks if the plugin is enabled in the Handsontable settings. This method is executed in {@link Hooks#beforeInit}
* hook and if it returns `true` than the {@link ColumnSorting#enablePlugin} method is called.
*
* @returns {Boolean}
*/
isEnabled() {
return !!(this.hot.getSettings()[this.pluginKey]);
}
/**
* Enables the plugin functionality for this Handsontable instance.
*/
enablePlugin() {
if (this.enabled) {
return;
}
if (isUndefined(this.hot.getSettings().observeChanges)) {
this.enableObserveChangesPlugin();
}
this.addHook('afterTrimRow', () => this.sortByPresetSortStates());
this.addHook('afterUntrimRow', () => this.sortByPresetSortStates());
this.addHook('modifyRow', (row, source) => this.onModifyRow(row, source));
this.addHook('unmodifyRow', (row, source) => this.onUnmodifyRow(row, source));
this.addHook('afterGetColHeader', (column, TH) => this.onAfterGetColHeader(column, TH));
this.addHook('beforeOnCellMouseDown', (event, coords, TD, controller) => this.onBeforeOnCellMouseDown(event, coords, TD, controller));
this.addHook('afterOnCellMouseDown', (event, target) => this.onAfterOnCellMouseDown(event, target));
this.addHook('afterCreateRow', (index, amount) => this.onAfterCreateRow(index, amount));
this.addHook('afterRemoveRow', (index, amount) => this.onAfterRemoveRow(index, amount));
this.addHook('afterInit', () => this.loadOrSortBySettings());
this.addHook('afterLoadData', initialLoad => this.onAfterLoadData(initialLoad));
this.addHook('afterCreateCol', () => this.onAfterCreateCol());
this.addHook('afterRemoveCol', () => this.onAfterRemoveCol());
// TODO: Workaround? It should be refactored / described.
if (this.hot.view) {
this.loadOrSortBySettings();
}
super.enablePlugin();
}
/**
* Disables the plugin functionality for this Handsontable instance.
*/
disablePlugin() {
const clearColHeader = (column, TH) => {
const headerSpanElement = getHeaderSpanElement(TH);
if (isFirstLevelColumnHeader(column, TH) === false || headerSpanElement === null) {
return;
}
this.updateHeaderClasses(headerSpanElement);
};
// Changing header width and removing indicator.
this.hot.addHook('afterGetColHeader', clearColHeader);
this.hot.addHookOnce('afterRender', () => {
this.hot.removeHook('afterGetColHeader', clearColHeader);
});
this.rowsMapper.clearMap();
super.disablePlugin();
}
// DIFF - MultiColumnSorting & ColumnSorting: changed function documentation.
/**
* Sorts the table by chosen columns and orders.
*
* @param {undefined|Object} sortConfig Single column sort configuration. The configuration object contains `column` and `sortOrder` properties.
* First of them contains visual column index, the second one contains sort order (`asc` for ascending, `desc` for descending).
*
* **Note**: Please keep in mind that every call of `sort` function set an entirely new sort order. Previous sort configs aren't preserved.
*
* @example
* ```js
* // sort ascending first visual column
* hot.getPlugin('columnSorting').sort({ column: 0, sortOrder: 'asc' });
* ```
*
* @fires Hooks#beforeColumnSort
* @fires Hooks#afterColumnSort
*/
sort(sortConfig) {
const currentSortConfig = this.getSortConfig();
// We always pass configs defined as an array to `beforeColumnSort` and `afterColumnSort` hooks.
const destinationSortConfigs = this.getNormalizedSortConfigs(sortConfig);
const sortPossible = this.areValidSortConfigs(destinationSortConfigs);
const allowSort = this.hot.runHooks('beforeColumnSort', currentSortConfig, destinationSortConfigs, sortPossible);
if (allowSort === false) {
return;
}
if (sortPossible) {
const translateColumnToPhysical = ({ column: visualColumn, ...restOfProperties }) =>
({ column: this.hot.toPhysicalColumn(visualColumn), ...restOfProperties });
const internalSortStates = arrayMap(destinationSortConfigs, columnSortConfig => translateColumnToPhysical(columnSortConfig));
this.columnStatesManager.setSortStates(internalSortStates);
this.sortByPresetSortStates();
this.saveAllSortSettings();
this.hot.render();
this.hot.view.wt.draw(true); // TODO: Workaround? One test won't pass after removal. It should be refactored / described.
}
this.hot.runHooks('afterColumnSort', currentSortConfig, this.getSortConfig(), sortPossible);
}
/**
* Clear the sort performed on the table.
*/
clearSort() {
this.sort([]);
}
/**
* Checks if the table is sorted (any column have to be sorted).
*
* @returns {Boolean}
*/
isSorted() {
return this.enabled && !this.columnStatesManager.isListOfSortedColumnsEmpty();
}
/**
* Get sort configuration for particular column or for all sorted columns. Objects contain `column` and `sortOrder` properties.
*
* **Note**: Please keep in mind that returned objects expose **visual** column index under the `column` key. They are handled by the `sort` function.
*
* @param {Number} [column] Visual column index.
* @returns {undefined|Object|Array}
*/
getSortConfig(column) {
const translateColumnToVisual = ({ column: physicalColumn, ...restOfProperties }) =>
({ column: this.hot.toVisualColumn(physicalColumn), ...restOfProperties });
if (isDefined(column)) {
const physicalColumn = this.hot.toPhysicalColumn(column);
const columnSortState = this.columnStatesManager.getColumnSortState(physicalColumn);
if (isDefined(columnSortState)) {
return translateColumnToVisual(columnSortState);
}
return;
}
const sortStates = this.columnStatesManager.getSortStates();
return arrayMap(sortStates, columnState => translateColumnToVisual(columnState));
}
/**
* @description
* Warn: Useful mainly for providing server side sort implementation (see in the example below). It doesn't sort the data set. It just sets sort configuration for all sorted columns.
*
* @example
* ```js
* beforeColumnSort: function(currentSortConfig, destinationSortConfigs) {
* const columnSortPlugin = this.getPlugin('columnSorting');
*
* columnSortPlugin.setSortConfig(destinationSortConfigs);
*
* // const newData = ... // Calculated data set, ie. from an AJAX call.
*
* // this.loadData(newData); // Load new data set.
*
* return false; // The blockade for the default sort action.
* }```
*
* @param {undefined|Object|Array} sortConfig Single column sort configuration or full sort configuration (for all sorted columns).
* The configuration object contains `column` and `sortOrder` properties. First of them contains visual column index, the second one contains
* sort order (`asc` for ascending, `desc` for descending).
*/
setSortConfig(sortConfig) {
// We always set configs defined as an array.
const destinationSortConfigs = this.getNormalizedSortConfigs(sortConfig);
if (this.areValidSortConfigs(destinationSortConfigs)) {
const translateColumnToPhysical = ({ column: visualColumn, ...restOfProperties }) =>
({ column: this.hot.toPhysicalColumn(visualColumn), ...restOfProperties });
const internalSortStates = arrayMap(destinationSortConfigs, columnSortConfig => translateColumnToPhysical(columnSortConfig));
this.columnStatesManager.setSortStates(internalSortStates);
}
}
/**
* Get normalized sort configs.
*
* @private
* @param {Object|Array} [sortConfig=[]] Single column sort configuration or full sort configuration (for all sorted columns).
* The configuration object contains `column` and `sortOrder` properties. First of them contains visual column index, the second one contains
* sort order (`asc` for ascending, `desc` for descending).
* @returns {Array}
*/
getNormalizedSortConfigs(sortConfig = []) {
if (Array.isArray(sortConfig)) {
return sortConfig.slice(0, 1);
}
return [sortConfig].slice(0, 1);
}
/**
* Get if sort configs are valid.
*
* @private
* @param {Array} sortConfigs Sort configuration for all sorted columns. Objects contain `column` and `sortOrder` properties.
* @returns {Boolean}
*/
areValidSortConfigs(sortConfigs) {
if (Array.isArray(sortConfigs) === false) {
return false;
}
const sortedColumns = sortConfigs.map(({ column }) => column);
const numberOfColumns = this.hot.countCols();
const onlyExistingVisualIndexes = sortedColumns.every(visualColumn =>
visualColumn <= numberOfColumns && visualColumn >= 0);
return areValidSortStates(sortConfigs) && onlyExistingVisualIndexes; // We don't translate visual indexes to physical indexes.
}
/**
* Saves all sorting settings. Saving works only when {@link Options#persistentState} option is enabled.
*
* @private
* @fires Hooks#persistentStateSave
*/
saveAllSortSettings() {
const allSortSettings = this.columnStatesManager.getAllColumnsProperties();
allSortSettings.initialConfig = this.columnStatesManager.getSortStates();
this.hot.runHooks('persistentStateSave', 'columnSorting', allSortSettings);
}
/**
* Get all saved sorting settings. Loading works only when {@link Options#persistentState} option is enabled.
*
* @private
* @returns {Object} Previously saved sort settings.
*
* @fires Hooks#persistentStateLoad
*/
getAllSavedSortSettings() {
const storedAllSortSettings = {};
this.hot.runHooks('persistentStateLoad', 'columnSorting', storedAllSortSettings);
const allSortSettings = storedAllSortSettings.value;
const translateColumnToVisual = ({ column: physicalColumn, ...restOfProperties }) =>
({ column: this.hot.toVisualColumn(physicalColumn), ...restOfProperties });
if (isDefined(allSortSettings) && Array.isArray(allSortSettings.initialConfig)) {
allSortSettings.initialConfig = arrayMap(allSortSettings.initialConfig, translateColumnToVisual);
}
return allSortSettings;
}
/**
* Get next sort configuration for particular column. Object contain `column` and `sortOrder` properties.
*
* **Note**: Please keep in mind that returned object expose **visual** column index under the `column` key.
*
* @private
* @param {Number} column Visual column index.
* @returns {undefined|Object}
*/
getColumnNextConfig(column) {
const physicalColumn = this.hot.toPhysicalColumn(column);
if (this.columnStatesManager.isColumnSorted(physicalColumn)) {
const columnSortConfig = this.getSortConfig(column);
const sortOrder = getNextSortOrder(columnSortConfig.sortOrder);
if (isDefined(sortOrder)) {
columnSortConfig.sortOrder = sortOrder;
return columnSortConfig;
}
return;
}
const nrOfColumns = this.hot.countCols();
if (Number.isInteger(column) && column >= 0 && column < nrOfColumns) {
return {
column,
sortOrder: getNextSortOrder()
};
}
}
/**
* Get sort configuration with "next order" for particular column.
*
* @private
* @param {Number} columnToChange Visual column index of column which order will be changed.
* @param {String} strategyId ID of strategy. Possible values: 'append' and 'replace'. The first one
* change order of particular column and change it's position in the sort queue to the last one. The second one
* just change order of particular column.
*
* **Note**: Please keep in mind that returned objects expose **visual** column index under the `column` key.
*
* @returns {Array}
*/
getNextSortConfig(columnToChange, strategyId = APPEND_COLUMN_CONFIG_STRATEGY) {
const physicalColumn = this.hot.toPhysicalColumn(columnToChange);
const indexOfColumnToChange = this.columnStatesManager.getIndexOfColumnInSortQueue(physicalColumn);
const isColumnSorted = this.columnStatesManager.isColumnSorted(physicalColumn);
const currentSortConfig = this.getSortConfig();
const nextColumnConfig = this.getColumnNextConfig(columnToChange);
if (isColumnSorted) {
if (isUndefined(nextColumnConfig)) {
return [...currentSortConfig.slice(0, indexOfColumnToChange), ...currentSortConfig.slice(indexOfColumnToChange + 1)];
}
if (strategyId === APPEND_COLUMN_CONFIG_STRATEGY) {
return [...currentSortConfig.slice(0, indexOfColumnToChange), ...currentSortConfig.slice(indexOfColumnToChange + 1), nextColumnConfig];
} else if (strategyId === REPLACE_COLUMN_CONFIG_STRATEGY) {
return [...currentSortConfig.slice(0, indexOfColumnToChange), nextColumnConfig, ...currentSortConfig.slice(indexOfColumnToChange + 1)];
}
}
if (isDefined(nextColumnConfig)) {
return currentSortConfig.concat(nextColumnConfig);
}
return currentSortConfig;
}
/**
* Saves to cache part of plugins related properties, properly merged from cascade settings.
*
* @private
* @param {Number} column Visual column index.
* @returns {Object}
*/
// TODO: Workaround. Inheriting of non-primitive cell meta values doesn't work. Using this function we don't count
// merged properties few times.
setMergedPluginSettings(column) {
const physicalColumnIndex = this.hot.toPhysicalColumn(column);
const pluginMainSettings = this.hot.getSettings()[this.pluginKey];
const storedColumnProperties = this.columnStatesManager.getAllColumnsProperties();
const cellMeta = this.hot.getCellMeta(0, column);
const columnMeta = Object.getPrototypeOf(cellMeta);
const columnMetaHasPluginSettings = Object.hasOwnProperty.call(columnMeta, this.pluginKey);
const pluginColumnConfig = columnMetaHasPluginSettings ? columnMeta[this.pluginKey] : {};
this.columnMetaCache.set(physicalColumnIndex, Object.assign(storedColumnProperties, pluginMainSettings, pluginColumnConfig));
}
/**
* Get copy of settings for first cell in the column.
*
* @private
* @param {Number} column Visual column index.
* @returns {Object}
*/
// TODO: Workaround. Inheriting of non-primitive cell meta values doesn't work. Instead of getting properties from
// column meta we call this function.
getFirstCellSettings(column) {
// TODO: Remove test named: "should not break the dataset when inserted new row" (#5431).
const actualBlockTranslationFlag = this.blockPluginTranslation;
this.blockPluginTranslation = true;
if (this.columnMetaCache.size === 0) {
const numberOfColumns = this.hot.countCols();
rangeEach(numberOfColumns, visualColumnIndex => this.setMergedPluginSettings(visualColumnIndex));
}
const cellMeta = this.hot.getCellMeta(0, column);
this.blockPluginTranslation = actualBlockTranslationFlag;
const cellMetaCopy = Object.create(cellMeta);
cellMetaCopy[this.pluginKey] = this.columnMetaCache.get(this.hot.toPhysicalColumn(column));
return cellMetaCopy;
}
/**
* Get number of rows which should be sorted.
*
* @private
* @param {Number} numberOfRows Total number of displayed rows.
* @returns {Number}
*/
getNumberOfRowsToSort(numberOfRows) {
const settings = this.hot.getSettings();
// `maxRows` option doesn't take into account `minSpareRows` option in this case.
if (settings.maxRows <= numberOfRows) {
return settings.maxRows;
}
return numberOfRows - settings.minSpareRows;
}
/**
* Performs the sorting using a stable sort function basing on internal state of sorting.
*
* @private
*/
sortByPresetSortStates() {
if (this.columnStatesManager.isListOfSortedColumnsEmpty()) {
this.rowsMapper.clearMap();
return;
}
const indexesWithData = [];
const sortedColumnsList = this.columnStatesManager.getSortedColumns();
const numberOfRows = this.hot.countRows();
// Function `getDataAtCell` won't call the indices translation inside `onModifyRow` callback - we check the `blockPluginTranslation`
// flag inside it (we just want to get data not already modified by `columnSorting` plugin translation).
this.blockPluginTranslation = true;
const getDataForSortedColumns = visualRowIndex =>
arrayMap(sortedColumnsList, physicalColumn => this.hot.getDataAtCell(visualRowIndex, this.hot.toVisualColumn(physicalColumn)));
for (let visualRowIndex = 0; visualRowIndex < this.getNumberOfRowsToSort(numberOfRows); visualRowIndex += 1) {
indexesWithData.push([visualRowIndex].concat(getDataForSortedColumns(visualRowIndex)));
}
sort(
indexesWithData,
this.pluginKey,
arrayMap(sortedColumnsList, physicalColumn => this.columnStatesManager.getSortOrderOfColumn(physicalColumn)),
arrayMap(sortedColumnsList, physicalColumn => this.getFirstCellSettings(this.hot.toVisualColumn(physicalColumn)))
);
// Append spareRows
for (let visualRowIndex = indexesWithData.length; visualRowIndex < numberOfRows; visualRowIndex += 1) {
indexesWithData.push([visualRowIndex].concat(getDataForSortedColumns(visualRowIndex)));
}
// The blockade of the indices translation is released.
this.blockPluginTranslation = false;
// Save all indexes to arrayMapper, a completely new sequence is set by the plugin
this.rowsMapper._arrayMap = arrayMap(indexesWithData, indexWithData => indexWithData[0]);
}
/**
* Load saved settings or sort by predefined plugin configuration.
*
* @private
*/
loadOrSortBySettings() {
this.columnMetaCache.clear();
const storedAllSortSettings = this.getAllSavedSortSettings();
if (isObject(storedAllSortSettings)) {
this.sortBySettings(storedAllSortSettings);
} else {
const allSortSettings = this.hot.getSettings()[this.pluginKey];
this.sortBySettings(allSortSettings);
}
}
/**
* Sort the table by provided configuration.
*
* @private
* @param {Object} allSortSettings All sort config settings. Object may contain `initialConfig`, `indicator`,
* `sortEmptyCells`, `headerAction` and `compareFunctionFactory` properties.
*/
sortBySettings(allSortSettings) {
if (isObject(allSortSettings)) {
this.columnStatesManager.updateAllColumnsProperties(allSortSettings);
const initialConfig = allSortSettings.initialConfig;
if (Array.isArray(initialConfig) || isObject(initialConfig)) {
this.sort(initialConfig);
}
} else {
// Extra render for headers. Their width may change.
this.hot.render();
}
}
/**
* Enables the ObserveChanges plugin.
*
* @private
*/
enableObserveChangesPlugin() {
const _this = this;
this.hot._registerTimeout(
setTimeout(() => {
_this.hot.updateSettings({
observeChanges: true
});
}, 0));
}
/**
* Callback for `modifyRow` hook. Translates visual row index to the sorted row index.
*
* @private
* @param {Number} row Visual row index.
* @returns {Number} Physical row index.
*/
onModifyRow(row, source) {
if (this.blockPluginTranslation === false && source !== this.pluginName) {
const rowInMapper = this.rowsMapper.getValueByIndex(row);
row = rowInMapper === null ? row : rowInMapper;
}
return row;
}
/**
* Callback for `unmodifyRow` hook. Translates sorted row index to visual row index.
*
* @private
* @param {Number} row Physical row index.
* @returns {Number} Visual row index.
*/
onUnmodifyRow(row, source) {
if (this.blockPluginTranslation === false && source !== this.pluginName) {
row = this.rowsMapper.getIndexByValue(row);
}
return row;
}
/**
* Callback for the `onAfterGetColHeader` hook. Adds column sorting CSS classes.
*
* @private
* @param {Number} column Visual column index.
* @param {Element} TH TH HTML element.
*/
onAfterGetColHeader(column, TH) {
const headerSpanElement = getHeaderSpanElement(TH);
if (isFirstLevelColumnHeader(column, TH) === false || headerSpanElement === null) {
return;
}
const physicalColumn = this.hot.toPhysicalColumn(column);
const pluginSettingsForColumn = this.getFirstCellSettings(column)[this.pluginKey];
const showSortIndicator = pluginSettingsForColumn.indicator;
const headerActionEnabled = pluginSettingsForColumn.headerAction;
this.updateHeaderClasses(headerSpanElement, this.columnStatesManager, physicalColumn, showSortIndicator, headerActionEnabled);
}
/**
* Update header classes.
*
* @private
* @param {HTMLElement} headerSpanElement Header span element.
* @param {...*} args Extra arguments for helpers.
*/
updateHeaderClasses(headerSpanElement, ...args) {
removeClass(headerSpanElement, getClassedToRemove(headerSpanElement));
if (this.enabled !== false) {
addClass(headerSpanElement, getClassesToAdd(...args));
}
}
/**
* Overwriting base plugin's `onUpdateSettings` method. Please keep in mind that `onAfterUpdateSettings` isn't called
* for `updateSettings` in specific situations.
*
* @private
* @param {Object} newSettings New settings object.
*/
onUpdateSettings(newSettings) {
super.onUpdateSettings();
this.columnMetaCache.clear();
if (isDefined(newSettings[this.pluginKey])) {
this.sortBySettings(newSettings[this.pluginKey]);
}
}
/**
* Callback for the `afterLoadData` hook.
*
* @private
* @param {Boolean} initialLoad flag that determines whether the data has been loaded during the initialization.
*/
onAfterLoadData(initialLoad) {
this.rowsMapper.clearMap();
if (initialLoad === true) {
// TODO: Workaround? It should be refactored / described.
if (this.hot.view) {
this.loadOrSortBySettings();
}
}
}
/**
* Callback for the `afterCreateRow` hook.
*
* @private
* @param {Number} index Visual index of the created row.
* @param {Number} amount Amount of created rows.
*/
onAfterCreateRow(index, amount) {
this.rowsMapper.shiftItems(index, amount);
}
/**
* Callback for the `afterRemoveRow` hook.
*
* @private
* @param {Number} removedRows Visual indexes of the removed row.
* @param {Number} amount Amount of removed rows.
*/
onAfterRemoveRow(removedRows, amount) {
this.rowsMapper.unshiftItems(removedRows, amount);
}
// TODO: Workaround. Inheriting of non-primitive cell meta values doesn't work. We clear the cache after action which reorganize sequence of columns.
// TODO: Remove test named: "should add new columns properly when the `columnSorting` plugin is enabled (inheriting of non-primitive cell meta values)".
/**
* Callback for the `afterCreateCol` hook.
*
* @private
*/
onAfterCreateCol() {
this.columnMetaCache.clear();
}
// TODO: Workaround. Inheriting of non-primitive cell meta values doesn't work. We clear the cache after action which reorganize sequence of columns.
// TODO: Remove test named: "should add new columns properly when the `columnSorting` plugin is enabled (inheriting of non-primitive cell meta values)".
/**
* Callback for the `afterRemoveCol` hook.
*
* @private
*/
onAfterRemoveCol() {
this.columnMetaCache.clear();
}
/**
* Indicates if clickable header was clicked.
*
* @private
* @param {MouseEvent} event The `mousedown` event.
* @param {Number} column Visual column index.
* @returns {Boolean}
*/
wasClickableHeaderClicked(event, column) {
const pluginSettingsForColumn = this.getFirstCellSettings(column)[this.pluginKey];
const headerActionEnabled = pluginSettingsForColumn.headerAction;
return headerActionEnabled && event.realTarget.nodeName === 'SPAN';
}
/**
* Changes the behavior of selection / dragging.
*
* @private
* @param {MouseEvent} event The `mousedown` event.
* @param {CellCoords} coords Visual coordinates.
* @param {HTMLElement} TD
* @param {Object} blockCalculations
*/
onBeforeOnCellMouseDown(event, coords, TD, blockCalculations) {
// Click below the level of column headers
if (coords.row >= 0 || coords.col < 0) {
return;
}
if (this.wasClickableHeaderClicked(event, coords.col) && isPressedCtrlKey()) {
blockCalculations.column = true;
}
}
/**
* Callback for the `onAfterOnCellMouseDown` hook.
*
* @private
* @param {Event} event Event which are provided by hook.
* @param {CellCoords} coords Visual coords of the selected cell.
*/
onAfterOnCellMouseDown(event, coords) {
// Click below the level of column headers
if (coords.row >= 0 || coords.col < 0) {
return;
}
if (this.wasClickableHeaderClicked(event, coords.col)) {
if (isPressedCtrlKey()) {
this.hot.deselectCell();
this.hot.selectColumns(coords.col);
}
this.sort(this.getColumnNextConfig(coords.col));
}
}
/**
* Destroys the plugin instance.
*/
destroy() {
this.rowsMapper.destroy();
this.columnStatesManager.destroy();
super.destroy();
}
}
registerPlugin(PLUGIN_KEY, ColumnSorting);
export default ColumnSorting;
| 1 | 15,017 | I think after change this check `rowInMapper === null` is always false, so it's useless. | handsontable-handsontable | js |
@@ -166,7 +166,7 @@ func initClocks() {
nxp.ClockIpLpi2c2.Enable(false) //
nxp.ClockIpLpi2c3.Enable(false) //
nxp.DivIpLpi2c.Div(0) // divide LPI2C_CLK_PODF (DIV1)
- nxp.MuxIpLpi2c.Mux(0) // LPI2C select PLL3_SW_60M
+ nxp.MuxIpLpi2c.Mux(1) // LPI2C select OSC
nxp.ClockIpCan1.Enable(false) // disable CAN
nxp.ClockIpCan2.Enable(false) // | 1 | // +build mimxrt1062
package runtime
import (
"device/nxp"
)
// Core clock frequencies (Hz)
const (
CORE_FREQ = 600000000 // 600 MHz
OSC_FREQ = 24000000 // 24 MHz
)
// Note from Teensyduino (cores/teensy4/startup.c):
//
// | ARM SysTick is used for most Ardiuno timing functions, delay(), millis(),
// | micros(). SysTick can run from either the ARM core clock, or from an
// | "external" clock. NXP documents it as "24 MHz XTALOSC can be the external
// | clock source of SYSTICK" (RT1052 ref manual, rev 1, page 411). However,
// | NXP actually hid an undocumented divide-by-240 circuit in the hardware, so
// | the external clock is really 100 kHz. We use this clock rather than the
// | ARM clock, to allow SysTick to maintain correct timing even when we change
// | the ARM clock to run at different speeds.
const SYSTICK_FREQ = 100000 // 100 kHz
var (
ArmPllConfig = nxp.ClockConfigArmPll{
LoopDivider: 100, // PLL loop divider, Fout=Fin*50
Src: 0, // bypass clock source, 0=OSC24M, 1=CLK1_P & CLK1_N
}
SysPllConfig = nxp.ClockConfigSysPll{
LoopDivider: 1, // PLL loop divider, Fout=Fin*(20+LOOP*2+NUMER/DENOM)
Numerator: 0, // 30-bit NUMER of fractional loop divider
Denominator: 1, // 30-bit DENOM of fractional loop divider
Src: 0, // bypass clock source, 0=OSC24M, 1=CLK1_P & CLK1_N
}
Usb1PllConfig = nxp.ClockConfigUsbPll{
Instance: 1, // USB PLL instance
LoopDivider: 0, // PLL loop divider, Fout=Fin*20
Src: 0, // bypass clock source, 0=OSC24M, 1=CLK1_P & CLK1_N
}
Usb2PllConfig = nxp.ClockConfigUsbPll{
Instance: 2, // USB PLL instance
LoopDivider: 0, // PLL loop divider, Fout=Fin*20
Src: 0, // bypass clock source, 0=OSC24M, 1=CLK1_P & CLK1_N
}
)
// initClocks configures the core, buses, and all peripherals' clock source mux
// and dividers for runtime. The clock gates for individual peripherals are all
// disabled prior to configuration and must be enabled afterwards using one of
// these `enable*Clocks()` functions or the respective peripheral clocks'
// `Enable()` method from the "device/nxp" package.
func initClocks() {
// disable low-power mode so that __WFI doesn't lock up at runtime.
// see: Using the MIMXRT1060/4-EVK with MCUXpresso IDE v10.3.x (v1.0.2,
// 2019MAR01), chapter 14
nxp.ClockModeRun.Set()
// enable and use 1MHz clock output
nxp.XTALOSC24M.OSC_CONFIG2.SetBits(nxp.XTALOSC24M_OSC_CONFIG2_ENABLE_1M_Msk)
nxp.XTALOSC24M.OSC_CONFIG2.ClearBits(nxp.XTALOSC24M_OSC_CONFIG2_MUX_1M_Msk)
// initialize external 24 MHz clock
nxp.CCM_ANALOG.MISC0_CLR.Set(nxp.CCM_ANALOG_MISC0_XTAL_24M_PWD_Msk) // power
for !nxp.XTALOSC24M.LOWPWR_CTRL.HasBits(nxp.XTALOSC24M_LOWPWR_CTRL_XTALOSC_PWRUP_STAT_Msk) {
}
nxp.CCM_ANALOG.MISC0_SET.Set(nxp.CCM_ANALOG_MISC0_OSC_XTALOK_EN_Msk) // detect freq
for !nxp.CCM_ANALOG.MISC0.HasBits(nxp.CCM_ANALOG_MISC0_OSC_XTALOK_Msk) {
}
nxp.CCM_ANALOG.MISC0_CLR.Set(nxp.CCM_ANALOG_MISC0_OSC_XTALOK_EN_Msk)
// initialize internal RC OSC 24 MHz, and switch clock source to external OSC
nxp.XTALOSC24M.LOWPWR_CTRL.SetBits(nxp.XTALOSC24M_LOWPWR_CTRL_RC_OSC_EN_Msk)
nxp.XTALOSC24M.LOWPWR_CTRL_CLR.Set(nxp.XTALOSC24M_LOWPWR_CTRL_CLR_OSC_SEL_Msk)
// set oscillator ready counter value
nxp.CCM.CCR.Set((nxp.CCM.CCR.Get() & ^uint32(nxp.CCM_CCR_OSCNT_Msk)) |
((127 << nxp.CCM_CCR_OSCNT_Pos) & nxp.CCM_CCR_OSCNT_Msk))
// set PERIPH2_CLK and PERIPH to provide stable clock before PLLs initialed
nxp.MuxIpPeriphClk2.Mux(1) // PERIPH_CLK2 select OSC24M
nxp.MuxIpPeriph.Mux(1) // PERIPH select PERIPH_CLK2
// set VDD_SOC to 1.275V, necessary to config AHB to 600 MHz
nxp.DCDC.REG3.Set((nxp.DCDC.REG3.Get() & ^uint32(nxp.DCDC_REG3_TRG_Msk)) |
((13 << nxp.DCDC_REG3_TRG_Pos) & nxp.DCDC_REG3_TRG_Msk))
// wait until DCDC_STS_DC_OK bit is asserted
for !nxp.DCDC.REG0.HasBits(nxp.DCDC_REG0_STS_DC_OK_Msk) {
}
nxp.DivIpAhb.Div(0) // divide AHB_PODF (DIV1)
nxp.ClockIpAdc1.Enable(false) // disable ADC
nxp.ClockIpAdc2.Enable(false) //
nxp.ClockIpXbar1.Enable(false) // disable XBAR
nxp.ClockIpXbar2.Enable(false) //
nxp.ClockIpXbar3.Enable(false) //
nxp.DivIpIpg.Div(3) // divide IPG_PODF (DIV4)
nxp.DivIpArm.Div(1) // divide ARM_PODF (DIV2)
nxp.DivIpPeriphClk2.Div(0) // divide PERIPH_CLK2_PODF (DIV1)
nxp.ClockIpGpt1.Enable(false) // disable GPT/PIT
nxp.ClockIpGpt1S.Enable(false) //
nxp.ClockIpGpt2.Enable(false) //
nxp.ClockIpGpt2S.Enable(false) //
nxp.ClockIpPit.Enable(false) //
nxp.DivIpPerclk.Div(0) // divide PERCLK_PODF (DIV1)
nxp.ClockIpUsdhc1.Enable(false) // disable USDHC1
nxp.DivIpUsdhc1.Div(1) // divide USDHC1_PODF (DIV2)
nxp.MuxIpUsdhc1.Mux(1) // USDHC1 select PLL2_PFD0
nxp.ClockIpUsdhc2.Enable(false) // disable USDHC2
nxp.DivIpUsdhc2.Div(1) // divide USDHC2_PODF (DIV2)
nxp.MuxIpUsdhc2.Mux(1) // USDHC2 select PLL2_PFD0
nxp.ClockIpSemc.Enable(false) // disable SEMC
nxp.DivIpSemc.Div(1) // divide SEMC_PODF (DIV2)
nxp.MuxIpSemcAlt.Mux(0) // SEMC_ALT select PLL2_PFD2
nxp.MuxIpSemc.Mux(1) // SEMC select SEMC_ALT
if false {
// TODO: external flash is on this bus, configured via DCD block
nxp.ClockIpFlexSpi.Enable(false) // disable FLEXSPI
nxp.DivIpFlexSpi.Div(0) // divide FLEXSPI_PODF (DIV1)
nxp.MuxIpFlexSpi.Mux(2) // FLEXSPI select PLL2_PFD2
}
nxp.ClockIpFlexSpi2.Enable(false) // disable FLEXSPI2
nxp.DivIpFlexSpi2.Div(0) // divide FLEXSPI2_PODF (DIV1)
nxp.MuxIpFlexSpi2.Mux(0) // FLEXSPI2 select PLL2_PFD2
nxp.ClockIpCsi.Enable(false) // disable CSI
nxp.DivIpCsi.Div(1) // divide CSI_PODF (DIV2)
nxp.MuxIpCsi.Mux(0) // CSI select OSC24M
nxp.ClockIpLpspi1.Enable(false) // disable LPSPI
nxp.ClockIpLpspi2.Enable(false) //
nxp.ClockIpLpspi3.Enable(false) //
nxp.ClockIpLpspi4.Enable(false) //
nxp.DivIpLpspi.Div(3) // divide LPSPI_PODF (DIV4)
nxp.MuxIpLpspi.Mux(2) // LPSPI select PLL2
nxp.ClockIpTrace.Enable(false) // disable TRACE
nxp.DivIpTrace.Div(3) // divide TRACE_PODF (DIV4)
nxp.MuxIpTrace.Mux(0) // TRACE select PLL2_MAIN
nxp.ClockIpSai1.Enable(false) // disable SAI1
nxp.DivIpSai1Pre.Div(3) // divide SAI1_CLK_PRED (DIV4)
nxp.DivIpSai1.Div(1) // divide SAI1_CLK_PODF (DIV2)
nxp.MuxIpSai1.Mux(0) // SAI1 select PLL3_PFD2
nxp.ClockIpSai2.Enable(false) // disable SAI2
nxp.DivIpSai2Pre.Div(3) // divide SAI2_CLK_PRED (DIV4)
nxp.DivIpSai2.Div(1) // divide SAI2_CLK_PODF (DIV2)
nxp.MuxIpSai2.Mux(0) // SAI2 select PLL3_PFD2
nxp.ClockIpSai3.Enable(false) // disable SAI3
nxp.DivIpSai3Pre.Div(3) // divide SAI3_CLK_PRED (DIV4)
nxp.DivIpSai3.Div(1) // divide SAI3_CLK_PODF (DIV2)
nxp.MuxIpSai3.Mux(0) // SAI3 select PLL3_PFD2
nxp.ClockIpLpi2c1.Enable(false) // disable LPI2C
nxp.ClockIpLpi2c2.Enable(false) //
nxp.ClockIpLpi2c3.Enable(false) //
nxp.DivIpLpi2c.Div(0) // divide LPI2C_CLK_PODF (DIV1)
nxp.MuxIpLpi2c.Mux(0) // LPI2C select PLL3_SW_60M
nxp.ClockIpCan1.Enable(false) // disable CAN
nxp.ClockIpCan2.Enable(false) //
nxp.ClockIpCan3.Enable(false) //
nxp.ClockIpCan1S.Enable(false) //
nxp.ClockIpCan2S.Enable(false) //
nxp.ClockIpCan3S.Enable(false) //
nxp.DivIpCan.Div(1) // divide CAN_CLK_PODF (DIV2)
nxp.MuxIpCan.Mux(2) // CAN select PLL3_SW_80M
nxp.ClockIpLpuart1.Enable(false) // disable UART
nxp.ClockIpLpuart2.Enable(false) //
nxp.ClockIpLpuart3.Enable(false) //
nxp.ClockIpLpuart4.Enable(false) //
nxp.ClockIpLpuart5.Enable(false) //
nxp.ClockIpLpuart6.Enable(false) //
nxp.ClockIpLpuart7.Enable(false) //
nxp.ClockIpLpuart8.Enable(false) //
nxp.DivIpUart.Div(0) // divide UART_CLK_PODF (DIV1)
nxp.MuxIpUart.Mux(1) // UART select OSC
nxp.ClockIpLcdPixel.Enable(false) // disable LCDIF
nxp.DivIpLcdifPre.Div(1) // divide LCDIF_PRED (DIV2)
nxp.DivIpLcdif.Div(3) // divide LCDIF_CLK_PODF (DIV4)
nxp.MuxIpLcdifPre.Mux(5) // LCDIF_PRE select PLL3_PFD1
nxp.ClockIpSpdif.Enable(false) // disable SPDIF
nxp.DivIpSpdif0Pre.Div(1) // divide SPDIF0_CLK_PRED (DIV2)
nxp.DivIpSpdif0.Div(7) // divide SPDIF0_CLK_PODF (DIV8)
nxp.MuxIpSpdif.Mux(3) // SPDIF select PLL3_SW
nxp.ClockIpFlexio1.Enable(false) // disable FLEXIO1
nxp.DivIpFlexio1Pre.Div(1) // divide FLEXIO1_CLK_PRED (DIV2)
nxp.DivIpFlexio1.Div(7) // divide FLEXIO1_CLK_PODF (DIV8)
nxp.MuxIpFlexio1.Mux(3) // FLEXIO1 select PLL3_SW
nxp.ClockIpFlexio2.Enable(false) // disable FLEXIO2
nxp.DivIpFlexio2Pre.Div(1) // divide FLEXIO2_CLK_PRED (DIV2)
nxp.DivIpFlexio2.Div(7) // divide FLEXIO2_CLK_PODF (DIV8)
nxp.MuxIpFlexio2.Mux(3) // FLEXIO2 select PLL3_SW
nxp.MuxIpPll3Sw.Mux(0) // PLL3_SW select PLL3_MAIN
ArmPllConfig.Configure() // init ARM PLL
// SYS PLL (PLL2) @ 528 MHz
// PFD0 = 396 MHz -> USDHC1/USDHC2(DIV2)=198 MHz
// PFD1 = 594 MHz -> (currently unused)
// PFD2 = 327.72 MHz -> SEMC(DIV2)=163.86 MHz, FlexSPI/FlexSPI2=327.72 MHz
// PFD3 = 454.73 MHz -> (currently unused)
SysPllConfig.Configure(24, 16, 29, 16) // init SYS PLL and PFDs
// USB1 PLL (PLL3) @ 480 MHz
// PFD0 -> (currently unused)
// PFD1 -> (currently unused)
// PFD2 -> (currently unused)
// PFD3 -> (currently unused)
Usb1PllConfig.Configure() // init USB1 PLL and PFDs
Usb2PllConfig.Configure() // init USB2 PLL
nxp.MuxIpPrePeriph.Mux(3) // PRE_PERIPH select ARM_PLL
nxp.MuxIpPeriph.Mux(0) // PERIPH select PRE_PERIPH
nxp.MuxIpPeriphClk2.Mux(1) // PERIPH_CLK2 select OSC
nxp.MuxIpPerclk.Mux(1) // PERCLK select OSC
// set LVDS1 clock source
nxp.CCM_ANALOG.MISC1.Set((nxp.CCM_ANALOG.MISC1.Get() & ^uint32(nxp.CCM_ANALOG_MISC1_LVDS1_CLK_SEL_Msk)) |
((0 << nxp.CCM_ANALOG_MISC1_LVDS1_CLK_SEL_Pos) & nxp.CCM_ANALOG_MISC1_LVDS1_CLK_SEL_Msk))
// set CLOCK_OUT1 divider
nxp.CCM.CCOSR.Set((nxp.CCM.CCOSR.Get() & ^uint32(nxp.CCM_CCOSR_CLKO1_DIV_Msk)) |
((0 << nxp.CCM_CCOSR_CLKO1_DIV_Pos) & nxp.CCM_CCOSR_CLKO1_DIV_Msk))
// set CLOCK_OUT1 source
nxp.CCM.CCOSR.Set((nxp.CCM.CCOSR.Get() & ^uint32(nxp.CCM_CCOSR_CLKO1_SEL_Msk)) |
((1 << nxp.CCM_CCOSR_CLKO1_SEL_Pos) & nxp.CCM_CCOSR_CLKO1_SEL_Msk))
// set CLOCK_OUT2 divider
nxp.CCM.CCOSR.Set((nxp.CCM.CCOSR.Get() & ^uint32(nxp.CCM_CCOSR_CLKO2_DIV_Msk)) |
((0 << nxp.CCM_CCOSR_CLKO2_DIV_Pos) & nxp.CCM_CCOSR_CLKO2_DIV_Msk))
// set CLOCK_OUT2 source
nxp.CCM.CCOSR.Set((nxp.CCM.CCOSR.Get() & ^uint32(nxp.CCM_CCOSR_CLKO2_SEL_Msk)) |
((18 << nxp.CCM_CCOSR_CLKO2_SEL_Pos) & nxp.CCM_CCOSR_CLKO2_SEL_Msk))
nxp.CCM.CCOSR.ClearBits(nxp.CCM_CCOSR_CLK_OUT_SEL_Msk) // set CLK_OUT1 drives CLK_OUT
nxp.CCM.CCOSR.SetBits(nxp.CCM_CCOSR_CLKO1_EN_Msk) // enable CLK_OUT1
nxp.CCM.CCOSR.SetBits(nxp.CCM_CCOSR_CLKO2_EN_Msk) // enable CLK_OUT2
nxp.ClockIpIomuxcGpr.Enable(false) // disable IOMUXC_GPR
nxp.ClockIpIomuxc.Enable(false) // disable IOMUXC
// set GPT1 High frequency reference clock source
nxp.IOMUXC_GPR.GPR5.ClearBits(nxp.IOMUXC_GPR_GPR5_VREF_1M_CLK_GPT1_Msk)
// set GPT2 High frequency reference clock source
nxp.IOMUXC_GPR.GPR5.ClearBits(nxp.IOMUXC_GPR_GPR5_VREF_1M_CLK_GPT2_Msk)
nxp.ClockIpGpio1.Enable(false) // disable GPIO
nxp.ClockIpGpio2.Enable(false) //
nxp.ClockIpGpio3.Enable(false) //
nxp.ClockIpGpio4.Enable(false) //
}
func enableTimerClocks() {
nxp.ClockIpGpt1.Enable(true) // enable GPT/PIT
nxp.ClockIpGpt1S.Enable(true) //
nxp.ClockIpGpt2.Enable(true) //
nxp.ClockIpGpt2S.Enable(true) //
nxp.ClockIpPit.Enable(true) //
}
func enablePinClocks() {
nxp.ClockIpIomuxcGpr.Enable(true) // enable IOMUXC
nxp.ClockIpIomuxc.Enable(true) //
nxp.ClockIpGpio1.Enable(true) // enable GPIO
nxp.ClockIpGpio2.Enable(true) //
nxp.ClockIpGpio3.Enable(true) //
nxp.ClockIpGpio4.Enable(true) //
}
func enablePeripheralClocks() {
nxp.ClockIpAdc1.Enable(true) // enable ADC
nxp.ClockIpAdc2.Enable(true) //
nxp.ClockIpXbar1.Enable(true) // enable XBAR
nxp.ClockIpXbar2.Enable(true) //
nxp.ClockIpXbar3.Enable(true) //
nxp.ClockIpUsdhc1.Enable(true) // enable USDHC
nxp.ClockIpUsdhc2.Enable(true) //
nxp.ClockIpSemc.Enable(true) // enable SEMC
nxp.ClockIpFlexSpi2.Enable(true) // enable FLEXSPI2
nxp.ClockIpLpspi1.Enable(true) // enable LPSPI
nxp.ClockIpLpspi2.Enable(true) //
nxp.ClockIpLpspi3.Enable(true) //
nxp.ClockIpLpspi4.Enable(true) //
nxp.ClockIpLpi2c1.Enable(true) // enable LPI2C
nxp.ClockIpLpi2c2.Enable(true) //
nxp.ClockIpLpi2c3.Enable(true) //
nxp.ClockIpCan1.Enable(true) // enable CAN
nxp.ClockIpCan2.Enable(true) //
nxp.ClockIpCan3.Enable(true) //
nxp.ClockIpCan1S.Enable(true) //
nxp.ClockIpCan2S.Enable(true) //
nxp.ClockIpCan3S.Enable(true) //
nxp.ClockIpLpuart1.Enable(true) // enable UART
nxp.ClockIpLpuart2.Enable(true) //
nxp.ClockIpLpuart3.Enable(true) //
nxp.ClockIpLpuart4.Enable(true) //
nxp.ClockIpLpuart5.Enable(true) //
nxp.ClockIpLpuart6.Enable(true) //
nxp.ClockIpLpuart7.Enable(true) //
nxp.ClockIpLpuart8.Enable(true) //
nxp.ClockIpFlexio1.Enable(true) // enable FLEXIO
nxp.ClockIpFlexio2.Enable(true) //
}
| 1 | 11,118 | ClockIpLpi2c4.Enable() is required. The same change is required for enablePeripheralClocks(). | tinygo-org-tinygo | go |
@@ -67,6 +67,9 @@ type PrometheusSpec struct {
Version string `json:"version,omitempty"`
// Tag of Prometheus container image to be deployed. Defaults to the value of `version`.
Tag string `json:"tag,omitempty"`
+ // Sha of Prometheus container image to be deployed. Defaults to the value of `version`.
+ // Similar to a tag, but the sha explicitly deploys an immutable container image.
+ Sha string `json:"sha,omitempty"`
// When a Prometheus deployment is paused, no actions except for deletion
// will be performed on the underlying objects.
Paused bool `json:"paused,omitempty"` | 1 | // Copyright 2016 The prometheus-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
)
// Prometheus defines a Prometheus deployment.
// +k8s:openapi-gen=true
type Prometheus struct {
metav1.TypeMeta `json:",inline"`
// Standard object’s metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
// +k8s:openapi-gen=false
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the desired behavior of the Prometheus cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Spec PrometheusSpec `json:"spec"`
// Most recent observed status of the Prometheus cluster. Read-only. Not
// included when requesting from the apiserver, only from the Prometheus
// Operator API itself. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Status *PrometheusStatus `json:"status,omitempty"`
}
// PrometheusList is a list of Prometheuses.
// +k8s:openapi-gen=true
type PrometheusList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty"`
// List of Prometheuses
Items []*Prometheus `json:"items"`
}
// PrometheusSpec is a specification of the desired behavior of the Prometheus cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
// +k8s:openapi-gen=true
type PrometheusSpec struct {
// Standard object’s metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
// Metadata Labels and Annotations gets propagated to the prometheus pods.
PodMetadata *metav1.ObjectMeta `json:"podMetadata,omitempty"`
// ServiceMonitors to be selected for target discovery.
ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"`
// Namespaces to be selected for ServiceMonitor discovery. If nil, only
// check own namespace.
ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"`
// Version of Prometheus to be deployed.
Version string `json:"version,omitempty"`
// Tag of Prometheus container image to be deployed. Defaults to the value of `version`.
Tag string `json:"tag,omitempty"`
// When a Prometheus deployment is paused, no actions except for deletion
// will be performed on the underlying objects.
Paused bool `json:"paused,omitempty"`
// Base image to use for a Prometheus deployment.
BaseImage string `json:"baseImage,omitempty"`
// An optional list of references to secrets in the same namespace
// to use for pulling prometheus and alertmanager images from registries
// see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// Number of instances to deploy for a Prometheus deployment.
Replicas *int32 `json:"replicas,omitempty"`
// Time duration Prometheus shall retain data for. Default is '24h',
// and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds seconds minutes hours days weeks years).
Retention string `json:"retention,omitempty"`
// Log level for Prometheus to be configured with.
LogLevel string `json:"logLevel,omitempty"`
// Interval between consecutive scrapes.
ScrapeInterval string `json:"scrapeInterval,omitempty"`
// Interval between consecutive evaluations.
EvaluationInterval string `json:"evaluationInterval,omitempty"`
// The labels to add to any time series or alerts when communicating with
// external systems (federation, remote storage, Alertmanager).
ExternalLabels map[string]string `json:"externalLabels,omitempty"`
// The external URL the Prometheus instances will be available under. This is
// necessary to generate correct URLs. This is necessary if Prometheus is not
// served from root of a DNS name.
ExternalURL string `json:"externalUrl,omitempty"`
// The route prefix Prometheus registers HTTP handlers for. This is useful,
// if using ExternalURL and a proxy is rewriting HTTP routes of a request,
// and the actual ExternalURL is still true, but the server serves requests
// under a different route prefix. For example for use with `kubectl proxy`.
RoutePrefix string `json:"routePrefix,omitempty"`
// Storage spec to specify how storage shall be used.
Storage *StorageSpec `json:"storage,omitempty"`
// A selector to select which PrometheusRules to mount for loading alerting
// rules from. Until (excluding) Prometheus Operator v0.24.0 Prometheus
// Operator will migrate any legacy rule ConfigMaps to PrometheusRule custom
// resources selected by RuleSelector. Make sure it does not match any config
// maps that you do not want to be migrated.
RuleSelector *metav1.LabelSelector `json:"ruleSelector,omitempty"`
// Namespaces to be selected for PrometheusRules discovery. If unspecified, only
// the same namespace as the Prometheus object is in is used.
RuleNamespaceSelector *metav1.LabelSelector `json:"ruleNamespaceSelector,omitempty"`
// Define details regarding alerting.
Alerting *AlertingSpec `json:"alerting,omitempty"`
// Define resources requests and limits for single Pods.
Resources v1.ResourceRequirements `json:"resources,omitempty"`
// Define which Nodes the Pods are scheduled on.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// ServiceAccountName is the name of the ServiceAccount to use to run the
// Prometheus Pods.
ServiceAccountName string `json:"serviceAccountName,omitempty"`
// Secrets is a list of Secrets in the same namespace as the Prometheus
// object, which shall be mounted into the Prometheus Pods.
// The Secrets are mounted into /etc/prometheus/secrets/<secret-name>.
// Secrets changes after initial creation of a Prometheus object are not
// reflected in the running Pods. To change the secrets mounted into the
// Prometheus Pods, the object must be deleted and recreated with the new list
// of secrets.
Secrets []string `json:"secrets,omitempty"`
// If specified, the pod's scheduling constraints.
Affinity *v1.Affinity `json:"affinity,omitempty"`
// If specified, the pod's tolerations.
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
// If specified, the remote_write spec. This is an experimental feature, it may change in any upcoming release in a breaking way.
RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"`
// If specified, the remote_read spec. This is an experimental feature, it may change in any upcoming release in a breaking way.
RemoteRead []RemoteReadSpec `json:"remoteRead,omitempty"`
// SecurityContext holds pod-level security attributes and common container settings.
// This defaults to non root user with uid 1000 and gid 2000 for Prometheus >v2.0 and
// default PodSecurityContext for other versions.
SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"`
// ListenLocal makes the Prometheus server listen on loopback, so that it
// does not bind against the Pod IP.
ListenLocal bool `json:"listenLocal,omitempty"`
// Containers allows injecting additional containers. This is meant to
// allow adding an authentication proxy to a Prometheus pod.
Containers []v1.Container `json:"containers,omitempty"`
// AdditionalScrapeConfigs allows specifying a key of a Secret containing
// additional Prometheus scrape configurations. Scrape configurations
// specified are appended to the configurations generated by the Prometheus
// Operator. Job configurations specified must have the form as specified
// in the official Prometheus documentation:
// https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<scrape_config>.
// As scrape configs are appended, the user is responsible to make sure it
// is valid. Note that using this feature may expose the possibility to
// break upgrades of Prometheus. It is advised to review Prometheus release
// notes to ensure that no incompatible scrape configs are going to break
// Prometheus after the upgrade.
AdditionalScrapeConfigs *v1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"`
// AdditionalAlertManagerConfigs allows specifying a key of a Secret containing
// additional Prometheus AlertManager configurations. AlertManager configurations
// specified are appended to the configurations generated by the Prometheus
// Operator. Job configurations specified must have the form as specified
// in the official Prometheus documentation:
// https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<alertmanager_config>.
// As AlertManager configs are appended, the user is responsible to make sure it
// is valid. Note that using this feature may expose the possibility to
// break upgrades of Prometheus. It is advised to review Prometheus release
// notes to ensure that no incompatible AlertManager configs are going to break
// Prometheus after the upgrade.
AdditionalAlertManagerConfigs *v1.SecretKeySelector `json:"additionalAlertManagerConfigs,omitempty"`
// APIServerConfig allows specifying a host and auth methods to access apiserver.
// If left empty, Prometheus is assumed to run inside of the cluster
// and will discover API servers automatically and use the pod's CA certificate
// and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/.
APIServerConfig *APIServerConfig `json:"apiserverConfig,omitempty"`
// Thanos configuration allows configuring various aspects of a Prometheus
// server in a Thanos environment.
//
// This section is experimental, it may change significantly without
// deprecation notice in any release.
//
// This is experimental and may change significantly without backward
// compatibility in any release.
Thanos *ThanosSpec `json:"thanos,omitempty"`
}
// PrometheusStatus is the most recent observed status of the Prometheus cluster. Read-only. Not
// included when requesting from the apiserver, only from the Prometheus
// Operator API itself. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
// +k8s:openapi-gen=true
type PrometheusStatus struct {
// Represents whether any actions on the underlaying managed objects are
// being performed. Only delete actions will be performed.
Paused bool `json:"paused"`
// Total number of non-terminated pods targeted by this Prometheus deployment
// (their labels match the selector).
Replicas int32 `json:"replicas"`
// Total number of non-terminated pods targeted by this Prometheus deployment
// that have the desired version spec.
UpdatedReplicas int32 `json:"updatedReplicas"`
// Total number of available pods (ready for at least minReadySeconds)
// targeted by this Prometheus deployment.
AvailableReplicas int32 `json:"availableReplicas"`
// Total number of unavailable pods targeted by this Prometheus deployment.
UnavailableReplicas int32 `json:"unavailableReplicas"`
}
// AlertingSpec defines parameters for alerting configuration of Prometheus servers.
// +k8s:openapi-gen=true
type AlertingSpec struct {
// AlertmanagerEndpoints Prometheus should fire alerts against.
Alertmanagers []AlertmanagerEndpoints `json:"alertmanagers"`
}
// StorageSpec defines the configured storage for a group Prometheus servers.
// If neither `emptyDir` nor `volumeClaimTemplate` is specified, then by default an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) will be used.
// +k8s:openapi-gen=true
type StorageSpec struct {
// Name of the StorageClass to use when requesting storage provisioning. More
// info: https://kubernetes.io/docs/user-guide/persistent-volumes/#storageclasses
// (DEPRECATED - instead use `volumeClaimTemplate.spec.storageClassName`)
Class string `json:"class,omitempty"`
// EmptyDirVolumeSource to be used by the Prometheus StatefulSets. If specified, used in place of any volumeClaimTemplate. More
// info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir
EmptyDir *v1.EmptyDirVolumeSource `json:"emptyDir,omitempty"`
// A label query over volumes to consider for binding.
// (DEPRECATED - instead use `volumeClaimTemplate.spec.selector`)
Selector *metav1.LabelSelector `json:"selector,omitempty"`
// Resources represents the minimum resources the volume should have. More
// info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources
// (DEPRECATED - instead use `volumeClaimTemplate.spec.resources`)
Resources v1.ResourceRequirements `json:"resources,omitempty"`
// A PVC spec to be used by the Prometheus StatefulSets.
VolumeClaimTemplate v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"`
}
// ThanosSpec defines parameters for a Prometheus server within a Thanos deployment.
// +k8s:openapi-gen=true
type ThanosSpec struct {
// Peers is a DNS name for Thanos to discover peers through.
Peers *string `json:"peers,omitempty"`
// Version describes the version of Thanos to use.
Version *string `json:"version,omitempty"`
// Tag of Thanos sidecar container image to be deployed. Defaults to the value of `version`.
Tag *string `json:"tag,omitempty"`
// Thanos base image if other than default.
BaseImage *string `json:"baseImage,omitempty"`
// Resources defines the resource requirements for the Thanos sidecar.
// If not provided, no requests/limits will be set
Resources v1.ResourceRequirements `json:"resources,omitempty"`
// GCS configures use of GCS in Thanos.
GCS *ThanosGCSSpec `json:"gcs,omitempty"`
// S3 configures use of S3 in Thanos.
S3 *ThanosS3Spec `json:"s3,omitempty"`
}
// ThanosGCSSpec defines parameters for use of Google Cloud Storage (GCS) with
// Thanos.
// +k8s:openapi-gen=true
type ThanosGCSSpec struct {
// Google Cloud Storage bucket name for stored blocks. If empty it won't
// store any block inside Google Cloud Storage.
Bucket *string `json:"bucket,omitempty"`
// Secret to access our Bucket.
SecretKey *v1.SecretKeySelector `json:"credentials,omitempty"`
}
// ThanosS3Spec defines parameters for of AWS Simple Storage Service (S3) with
// Thanos. (S3 compatible services apply as well)
// +k8s:openapi-gen=true
type ThanosS3Spec struct {
// S3-Compatible API bucket name for stored blocks.
Bucket *string `json:"bucket,omitempty"`
// S3-Compatible API endpoint for stored blocks.
Endpoint *string `json:"endpoint,omitempty"`
// AccessKey for an S3-Compatible API.
AccessKey *v1.SecretKeySelector `json:"accessKey,omitempty"`
// SecretKey for an S3-Compatible API.
SecretKey *v1.SecretKeySelector `json:"secretKey,omitempty"`
// Whether to use an insecure connection with an S3-Compatible API.
Insecure *bool `json:"insecure,omitempty"`
// Whether to use S3 Signature Version 2; otherwise Signature Version 4 will be used.
SignatureVersion2 *bool `json:"signatureVersion2,omitempty"`
// Whether to use Server Side Encryption
EncryptSSE *bool `json:"encryptsse,omitempty"`
}
// RemoteWriteSpec defines the remote_write configuration for prometheus.
// +k8s:openapi-gen=true
type RemoteWriteSpec struct {
//The URL of the endpoint to send samples to.
URL string `json:"url"`
//Timeout for requests to the remote write endpoint.
RemoteTimeout string `json:"remoteTimeout,omitempty"`
//The list of remote write relabel configurations.
WriteRelabelConfigs []RelabelConfig `json:"writeRelabelConfigs,omitempty"`
//BasicAuth for the URL.
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
// File to read bearer token for remote write.
BearerToken string `json:"bearerToken,omitempty"`
// File to read bearer token for remote write.
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
// TLS Config to use for remote write.
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
//Optional ProxyURL
ProxyURL string `json:"proxyUrl,omitempty"`
// QueueConfig allows tuning of the remote write queue parameters.
QueueConfig *QueueConfig `json:"queueConfig,omitempty"`
}
// QueueConfig allows the tuning of remote_write queue_config parameters. This object
// is referenced in the RemoteWriteSpec object.
// +k8s:openapi-gen=true
type QueueConfig struct {
// Capacity is the number of samples to buffer per shard before we start dropping them.
Capacity int `json:"capacity,omitempty"`
// MaxShards is the maximum number of shards, i.e. amount of concurrency.
MaxShards int `json:"maxShards,omitempty"`
// MaxSamplesPerSend is the maximum number of samples per send.
MaxSamplesPerSend int `json:"maxSamplesPerSend,omitempty"`
// BatchSendDeadline is the maximum time a sample will wait in buffer.
BatchSendDeadline string `json:"batchSendDeadline,omitempty"`
// MaxRetries is the maximum number of times to retry a batch on recoverable errors.
MaxRetries int `json:"maxRetries,omitempty"`
// MinBackoff is the initial retry delay. Gets doubled for every retry.
MinBackoff string `json:"minBackoff,omitempty"`
// MaxBackoff is the maximum retry delay.
MaxBackoff string `json:"maxBackoff,omitempty"`
}
// RemoteReadSpec defines the remote_read configuration for prometheus.
// +k8s:openapi-gen=true
type RemoteReadSpec struct {
//The URL of the endpoint to send samples to.
URL string `json:"url"`
//An optional list of equality matchers which have to be present
// in a selector to query the remote read endpoint.
RequiredMatchers map[string]string `json:"requiredMatchers,omitempty"`
//Timeout for requests to the remote read endpoint.
RemoteTimeout string `json:"remoteTimeout,omitempty"`
//Whether reads should be made for queries for time ranges that
// the local storage should have complete data for.
ReadRecent bool `json:"readRecent,omitempty"`
//BasicAuth for the URL.
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
// bearer token for remote read.
BearerToken string `json:"bearerToken,omitempty"`
// File to read bearer token for remote read.
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
// TLS Config to use for remote read.
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
//Optional ProxyURL
ProxyURL string `json:"proxyUrl,omitempty"`
}
// RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion.
// It defines `<metric_relabel_configs>`-section of Prometheus configuration.
// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
// +k8s:openapi-gen=true
type RelabelConfig struct {
//The source labels select values from existing labels. Their content is concatenated
//using the configured separator and matched against the configured regular expression
//for the replace, keep, and drop actions.
SourceLabels []string `json:"sourceLabels,omitempty"`
//Separator placed between concatenated source label values. default is ';'.
Separator string `json:"separator,omitempty"`
//Label to which the resulting value is written in a replace action.
//It is mandatory for replace actions. Regex capture groups are available.
TargetLabel string `json:"targetLabel,omitempty"`
//Regular expression against which the extracted value is matched. defailt is '(.*)'
Regex string `json:"regex,omitempty"`
// Modulus to take of the hash of the source label values.
Modulus uint64 `json:"modulus,omitempty"`
//Replacement value against which a regex replace is performed if the
//regular expression matches. Regex capture groups are available. Default is '$1'
Replacement string `json:"replacement,omitempty"`
// Action to perform based on regex matching. Default is 'replace'
Action string `json:"action,omitempty"`
}
// APIServerConfig defines a host and auth methods to access apiserver.
// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config
// +k8s:openapi-gen=true
type APIServerConfig struct {
// Host of apiserver.
// A valid string consisting of a hostname or IP followed by an optional port number
Host string `json:"host"`
// BasicAuth allow an endpoint to authenticate over basic authentication
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
// Bearer token for accessing apiserver.
BearerToken string `json:"bearerToken,omitempty"`
// File to read bearer token for accessing apiserver.
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
// TLS Config to use for accessing apiserver.
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
}
// AlertmanagerEndpoints defines a selection of a single Endpoints object
// containing alertmanager IPs to fire alerts against.
// +k8s:openapi-gen=true
type AlertmanagerEndpoints struct {
// Namespace of Endpoints object.
Namespace string `json:"namespace"`
// Name of Endpoints object in Namespace.
Name string `json:"name"`
// Port the Alertmanager API is exposed on.
Port intstr.IntOrString `json:"port"`
// Scheme to use when firing alerts.
Scheme string `json:"scheme,omitempty"`
// Prefix for the HTTP path alerts are pushed to.
PathPrefix string `json:"pathPrefix,omitempty"`
// TLS Config to use for alertmanager connection.
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
// BearerTokenFile to read from filesystem to use when authenticating to
// Alertmanager.
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
}
// ServiceMonitor defines monitoring for a set of services.
// +k8s:openapi-gen=true
type ServiceMonitor struct {
metav1.TypeMeta `json:",inline"`
// Standard object’s metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
// +k8s:openapi-gen=false
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of desired Service selection for target discrovery by
// Prometheus.
Spec ServiceMonitorSpec `json:"spec"`
}
// ServiceMonitorSpec contains specification parameters for a ServiceMonitor.
// +k8s:openapi-gen=true
type ServiceMonitorSpec struct {
// The label to use to retrieve the job name from.
JobLabel string `json:"jobLabel,omitempty"`
// TargetLabels transfers labels on the Kubernetes Service onto the target.
TargetLabels []string `json:"targetLabels,omitempty"`
// A list of endpoints allowed as part of this ServiceMonitor.
Endpoints []Endpoint `json:"endpoints"`
// Selector to select Endpoints objects.
Selector metav1.LabelSelector `json:"selector"`
// Selector to select which namespaces the Endpoints objects are discovered from.
NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"`
}
// Endpoint defines a scrapeable endpoint serving Prometheus metrics.
// +k8s:openapi-gen=true
type Endpoint struct {
// Name of the service port this endpoint refers to. Mutually exclusive with targetPort.
Port string `json:"port,omitempty"`
// Name or number of the target port of the endpoint. Mutually exclusive with port.
TargetPort intstr.IntOrString `json:"targetPort,omitempty"`
// HTTP path to scrape for metrics.
Path string `json:"path,omitempty"`
// HTTP scheme to use for scraping.
Scheme string `json:"scheme,omitempty"`
// Optional HTTP URL parameters
Params map[string][]string `json:"params,omitempty"`
// Interval at which metrics should be scraped
Interval string `json:"interval,omitempty"`
// Timeout after which the scrape is ended
ScrapeTimeout string `json:"scrapeTimeout,omitempty"`
// TLS configuration to use when scraping the endpoint
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
// File to read bearer token for scraping targets.
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
// HonorLabels chooses the metric's labels on collisions with target labels.
HonorLabels bool `json:"honorLabels,omitempty"`
// BasicAuth allow an endpoint to authenticate over basic authentication
// More info: https://prometheus.io/docs/operating/configuration/#endpoints
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
// MetricRelabelConfigs to apply to samples before ingestion.
MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"`
// ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint.
ProxyURL *string `json:"proxyUrl,omitempty"`
}
// BasicAuth allow an endpoint to authenticate over basic authentication
// More info: https://prometheus.io/docs/operating/configuration/#endpoints
// +k8s:openapi-gen=true
type BasicAuth struct {
// The secret that contains the username for authenticate
Username v1.SecretKeySelector `json:"username,omitempty"`
// The secret that contains the password for authenticate
Password v1.SecretKeySelector `json:"password,omitempty"`
}
// TLSConfig specifies TLS configuration parameters.
// +k8s:openapi-gen=true
type TLSConfig struct {
// The CA cert to use for the targets.
CAFile string `json:"caFile,omitempty"`
// The client cert file for the targets.
CertFile string `json:"certFile,omitempty"`
// The client key file for the targets.
KeyFile string `json:"keyFile,omitempty"`
// Used to verify the hostname for the targets.
ServerName string `json:"serverName,omitempty"`
// Disable target certificate validation.
InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"`
}
// ServiceMonitorList is a list of ServiceMonitors.
// +k8s:openapi-gen=true
type ServiceMonitorList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty"`
// List of ServiceMonitors
Items []*ServiceMonitor `json:"items"`
}
// PrometheusRuleList is a list of PrometheusRules.
// +k8s:openapi-gen=true
type PrometheusRuleList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty"`
// List of Rules
Items []*PrometheusRule `json:"items"`
}
// PrometheusRule defines alerting rules for a Prometheus instance
// +k8s:openapi-gen=true
type PrometheusRule struct {
metav1.TypeMeta `json:",inline"`
// Standard object’s metadata. More info:
// http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of desired alerting rule definitions for Prometheus.
Spec PrometheusRuleSpec `json:"spec"`
}
// PrometheusRuleSpec contains specification parameters for a Rule.
// +k8s:openapi-gen=true
type PrometheusRuleSpec struct {
// Content of Prometheus rule file
Groups []RuleGroup `json:"groups,omitempty"`
}
// RuleGroup and Rule are copied instead of vendored because the
// upstream Prometheus struct definitions don't have json struct tags.
// RuleGroup is a list of sequentially evaluated recording and alerting rules.
// +k8s:openapi-gen=true
type RuleGroup struct {
Name string `json:"name"`
Interval string `json:"interval,omitempty"`
Rules []Rule `json:"rules"`
}
// Rule describes an alerting or recording rule.
// +k8s:openapi-gen=true
type Rule struct {
Record string `json:"record,omitempty"`
Alert string `json:"alert,omitempty"`
Expr intstr.IntOrString `json:"expr"`
For string `json:"for,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
}
// Alertmanager describes an Alertmanager cluster.
// +k8s:openapi-gen=true
type Alertmanager struct {
metav1.TypeMeta `json:",inline"`
// Standard object’s metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
// +k8s:openapi-gen=false
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the desired behavior of the Alertmanager cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Spec AlertmanagerSpec `json:"spec"`
// Most recent observed status of the Alertmanager cluster. Read-only. Not
// included when requesting from the apiserver, only from the Prometheus
// Operator API itself. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Status *AlertmanagerStatus `json:"status,omitempty"`
}
// AlertmanagerSpec is a specification of the desired behavior of the Alertmanager cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
// +k8s:openapi-gen=true
type AlertmanagerSpec struct {
// Standard object’s metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
// Metadata Labels and Annotations gets propagated to the prometheus pods.
PodMetadata *metav1.ObjectMeta `json:"podMetadata,omitempty"`
// Version the cluster should be on.
Version string `json:"version,omitempty"`
// Tag of Alertmanager container image to be deployed. Defaults to the value of `version`.
Tag string `json:"tag,omitempty"`
// Base image that is used to deploy pods, without tag.
BaseImage string `json:"baseImage,omitempty"`
// An optional list of references to secrets in the same namespace
// to use for pulling prometheus and alertmanager images from registries
// see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// Secrets is a list of Secrets in the same namespace as the Alertmanager
// object, which shall be mounted into the Alertmanager Pods.
// The Secrets are mounted into /etc/alertmanager/secrets/<secret-name>.
Secrets []string `json:"secrets,omitempty"`
// Log level for Alertmanager to be configured with.
LogLevel string `json:"logLevel,omitempty"`
// Size is the expected size of the alertmanager cluster. The controller will
// eventually make the size of the running cluster equal to the expected
// size.
Replicas *int32 `json:"replicas,omitempty"`
// Time duration Alertmanager shall retain data for. Default is '120h',
// and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds seconds minutes hours days weeks years).
Retention string `json:"retention,omitempty"`
// Storage is the definition of how storage will be used by the Alertmanager
// instances.
Storage *StorageSpec `json:"storage,omitempty"`
// The external URL the Alertmanager instances will be available under. This is
// necessary to generate correct URLs. This is necessary if Alertmanager is not
// served from root of a DNS name.
ExternalURL string `json:"externalUrl,omitempty"`
// The route prefix Alertmanager registers HTTP handlers for. This is useful,
// if using ExternalURL and a proxy is rewriting HTTP routes of a request,
// and the actual ExternalURL is still true, but the server serves requests
// under a different route prefix. For example for use with `kubectl proxy`.
RoutePrefix string `json:"routePrefix,omitempty"`
// If set to true all actions on the underlaying managed objects are not
// goint to be performed, except for delete actions.
Paused bool `json:"paused,omitempty"`
// Define which Nodes the Pods are scheduled on.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Define resources requests and limits for single Pods.
Resources v1.ResourceRequirements `json:"resources,omitempty"`
// If specified, the pod's scheduling constraints.
Affinity *v1.Affinity `json:"affinity,omitempty"`
// If specified, the pod's tolerations.
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
// SecurityContext holds pod-level security attributes and common container settings.
// This defaults to non root user with uid 1000 and gid 2000.
SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"`
// ServiceAccountName is the name of the ServiceAccount to use to run the
// Prometheus Pods.
ServiceAccountName string `json:"serviceAccountName,omitempty"`
// ListenLocal makes the Alertmanager server listen on loopback, so that it
// does not bind against the Pod IP. Note this is only for the Alertmanager
// UI, not the gossip communication.
ListenLocal bool `json:"listenLocal,omitempty"`
// Containers allows injecting additional containers. This is meant to
// allow adding an authentication proxy to an Alertmanager pod.
Containers []v1.Container `json:"containers,omitempty"`
}
// AlertmanagerList is a list of Alertmanagers.
// +k8s:openapi-gen=true
type AlertmanagerList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty"`
// List of Alertmanagers
Items []Alertmanager `json:"items"`
}
// AlertmanagerStatus is the most recent observed status of the Alertmanager cluster. Read-only. Not
// included when requesting from the apiserver, only from the Prometheus
// Operator API itself. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
// +k8s:openapi-gen=true
type AlertmanagerStatus struct {
// Represents whether any actions on the underlaying managed objects are
// being performed. Only delete actions will be performed.
Paused bool `json:"paused"`
// Total number of non-terminated pods targeted by this Alertmanager
// cluster (their labels match the selector).
Replicas int32 `json:"replicas"`
// Total number of non-terminated pods targeted by this Alertmanager
// cluster that have the desired version spec.
UpdatedReplicas int32 `json:"updatedReplicas"`
// Total number of available pods (ready for at least minReadySeconds)
// targeted by this Alertmanager cluster.
AvailableReplicas int32 `json:"availableReplicas"`
// Total number of unavailable pods targeted by this Alertmanager cluster.
UnavailableReplicas int32 `json:"unavailableReplicas"`
}
// NamespaceSelector is a selector for selecting either all namespaces or a
// list of namespaces.
// +k8s:openapi-gen=true
type NamespaceSelector struct {
// Boolean describing whether all namespaces are selected in contrast to a
// list restricting them.
Any bool `json:"any,omitempty"`
// List of namespace names.
MatchNames []string `json:"matchNames,omitempty"`
// TODO(fabxc): this should embed metav1.LabelSelector eventually.
// Currently the selector is only used for namespaces which require more complex
// implementation to support label selections.
}
// DeepCopyObject implements the runtime.Object interface.
func (l *Alertmanager) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *AlertmanagerList) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *Prometheus) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *PrometheusList) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *ServiceMonitor) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *ServiceMonitorList) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (f *PrometheusRule) DeepCopyObject() runtime.Object {
return f.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *PrometheusRuleList) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
| 1 | 11,334 | nit: this should be `SHA` since it is an acronym for `Secure Hash Algorithm`, no? | prometheus-operator-prometheus-operator | go |
@@ -27,6 +27,13 @@ import jstz from 'jstz'
* @returns {String} Current timezone of user
*/
export default () => {
+ if (window.Intl && typeof window.Intl === 'object') {
+ const { timeZone } = Intl.DateTimeFormat().resolvedOptions()
+ if (timeZone) {
+ return timeZone
+ }
+ }
+
const determinedTimezone = jstz.determine()
if (!determinedTimezone) {
return 'UTC' | 1 | /**
* @copyright Copyright (c) 2019 Georg Ehrke
*
* @author Georg Ehrke <[email protected]>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
import jstz from 'jstz'
/**
* Returns the current timezone of the user
*
* @returns {String} Current timezone of user
*/
export default () => {
const determinedTimezone = jstz.determine()
if (!determinedTimezone) {
return 'UTC'
}
const timezoneName = determinedTimezone.name()
if (!timezoneName) {
return 'UTC'
}
return timezoneName
}
| 1 | 7,002 | Not sure if Intl can be defined but not `Intl.DateTimeFormat`. Both seem to have the same percentage on CanIUse. | nextcloud-calendar | js |
@@ -121,7 +121,7 @@ func (p *Provisioner) Provision(opts pvController.VolumeOptions) (*v1.Persistent
if reqMap != nil {
size = pvc.Spec.Resources.Requests["storage"]
}
- sendEventOrIgnore(name, size.String(), stgType, analytics.VolumeProvision)
+ sendEventOrIgnore(pvc.Name, name, size.String(), stgType, analytics.VolumeProvision)
if stgType == "hostpath" {
return p.ProvisionHostPath(opts, pvCASConfig)
} | 1 | /*
Copyright 2019 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file contains the volume creation and deletion handlers invoked by
the github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller.
The handler that are madatory to be implemented:
- Provision - is called by controller to perform custom validation on the PVC
request and return a valid PV spec. The controller will create the PV object
using the spec passed to it and bind it to the PVC.
- Delete - is called by controller to perform cleanup tasks on the PV before
deleting it.
*/
package app
import (
"fmt"
"github.com/openebs/maya/pkg/alertlog"
"strings"
"github.com/pkg/errors"
"k8s.io/klog"
pvController "sigs.k8s.io/sig-storage-lib-external-provisioner/controller"
//pvController "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller"
mconfig "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
menv "github.com/openebs/maya/pkg/env/v1alpha1"
analytics "github.com/openebs/maya/pkg/usage"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
//metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
)
// NewProvisioner will create a new Provisioner object and initialize
// it with global information used across PV create and delete operations.
func NewProvisioner(stopCh chan struct{}, kubeClient *clientset.Clientset) (*Provisioner, error) {
namespace := getOpenEBSNamespace() //menv.Get(menv.OpenEBSNamespace)
if len(strings.TrimSpace(namespace)) == 0 {
return nil, fmt.Errorf("Cannot start Provisioner: failed to get namespace")
}
p := &Provisioner{
stopCh: stopCh,
kubeClient: kubeClient,
namespace: namespace,
helperImage: getDefaultHelperImage(),
defaultConfig: []mconfig.Config{
{
Name: KeyPVBasePath,
Value: getDefaultBasePath(),
},
},
}
p.getVolumeConfig = p.GetVolumeConfig
return p, nil
}
// SupportsBlock will be used by controller to determine if block mode is
// supported by the host path provisioner.
func (p *Provisioner) SupportsBlock() bool {
return true
}
// Provision is invoked by the PVC controller which expect the PV
// to be provisioned and a valid PV spec returned.
func (p *Provisioner) Provision(opts pvController.VolumeOptions) (*v1.PersistentVolume, error) {
pvc := opts.PVC
if pvc.Spec.Selector != nil {
return nil, fmt.Errorf("claim.Spec.Selector is not supported")
}
for _, accessMode := range pvc.Spec.AccessModes {
if accessMode != v1.ReadWriteOnce {
return nil, fmt.Errorf("Only support ReadWriteOnce access mode")
}
}
if opts.SelectedNode == nil {
return nil, fmt.Errorf("configuration error, no node was specified")
}
if GetNodeHostname(opts.SelectedNode) == "" {
return nil, fmt.Errorf("configuration error, node{%v} hostname is empty", opts.SelectedNode.Name)
}
name := opts.PVName
// Create a new Config instance for the PV by merging the
// default configuration with configuration provided
// via PVC and the associated StorageClass
pvCASConfig, err := p.getVolumeConfig(name, pvc)
if err != nil {
return nil, err
}
//TODO: Determine if hostpath or device based Local PV should be created
stgType := pvCASConfig.GetStorageType()
size := resource.Quantity{}
reqMap := pvc.Spec.Resources.Requests
if reqMap != nil {
size = pvc.Spec.Resources.Requests["storage"]
}
sendEventOrIgnore(name, size.String(), stgType, analytics.VolumeProvision)
if stgType == "hostpath" {
return p.ProvisionHostPath(opts, pvCASConfig)
}
if stgType == "device" {
return p.ProvisionBlockDevice(opts, pvCASConfig)
}
if *opts.PVC.Spec.VolumeMode == v1.PersistentVolumeBlock && stgType != "device" {
return nil, fmt.Errorf("PV with BlockMode is not supported with StorageType %v", stgType)
}
alertlog.Logger.Errorw("",
"eventcode", "local.pv.provision.failure",
"msg", "Failed to provision Local PV",
"rname", opts.PVName,
"reason", "StorageType not supported",
"storagetype", stgType,
)
return nil, fmt.Errorf("PV with StorageType %v is not supported", stgType)
}
// Delete is invoked by the PVC controller to perform clean-up
// activities before deleteing the PV object. If reclaim policy is
// set to not-retain, then this function will create a helper pod
// to delete the host path from the node.
func (p *Provisioner) Delete(pv *v1.PersistentVolume) (err error) {
defer func() {
err = errors.Wrapf(err, "failed to delete volume %v", pv.Name)
}()
//Initiate clean up only when reclaim policy is not retain.
if pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimRetain {
//TODO: Determine the type of PV
pvType := GetLocalPVType(pv)
size := resource.Quantity{}
reqMap := pv.Spec.Capacity
if reqMap != nil {
size = pv.Spec.Capacity["storage"]
}
sendEventOrIgnore(pv.Name, size.String(), pvType, analytics.VolumeDeprovision)
if pvType == "local-device" {
err := p.DeleteBlockDevice(pv)
if err != nil {
alertlog.Logger.Errorw("",
"eventcode", "local.pv.delete.failure",
"msg", "Failed to delete Local PV",
"rname", pv.Name,
"reason", "failed to delete block device",
"storagetype", pvType,
)
}
return err
}
err = p.DeleteHostPath(pv)
if err != nil {
alertlog.Logger.Errorw("",
"eventcode", "local.pv.delete.failure",
"msg", "Failed to delete Local PV",
"rname", pv.Name,
"reason", "failed to delete host path",
"storagetype", pvType,
)
}
return err
}
klog.Infof("Retained volume %v", pv.Name)
alertlog.Logger.Infow("",
"eventcode", "local.pv.delete.success",
"msg", "Successfully deleted Local PV",
"rname", pv.Name,
)
return nil
}
// sendEventOrIgnore sends anonymous local-pv provision/delete events
func sendEventOrIgnore(pvName, capacity, stgType, method string) {
if method == analytics.VolumeProvision {
stgType = "local-" + stgType
}
if menv.Truthy(menv.OpenEBSEnableAnalytics) {
analytics.New().Build().ApplicationBuilder().
SetVolumeType(stgType, method).
SetDocumentTitle(pvName).
SetLabel(analytics.EventLabelCapacity).
SetReplicaCount(analytics.LocalPVReplicaCount, method).
SetCategory(method).
SetVolumeCapacity(capacity).Send()
}
}
| 1 | 18,532 | PTAL, looks like the order of PVC & PV is reversed for different storage-engine types, for Jiva/CStor the args sent to sendEventOrIgnore are `PV, PVCName`. | openebs-maya | go |
@@ -65,7 +65,7 @@ import vn.mbm.phimp.me.utils.RSSPhotoItem_Personal;
@SuppressWarnings("deprecation")
public class PhimpMe extends AppCompatActivity implements BottomNavigationView.OnNavigationItemSelectedListener //, android.view.GestureDetector.OnGestureListener
{
- public static Context ctx;
+ public static Context ctx ;
public static File DataDirectory;
public static final String PREFS_NAME = "PhimpMePrefs";
public static final String DATABASE_NAME = "PhimpMe"; | 1 | package vn.mbm.phimp.me;
import android.Manifest;
import android.app.AlertDialog;
import android.app.ProgressDialog;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.pm.ActivityInfo;
import android.database.Cursor;
import android.gesture.GestureOverlayView;
import android.graphics.Bitmap;
import android.net.Uri;
import android.os.AsyncTask;
import android.os.Bundle;
import android.provider.MediaStore;
import android.support.annotation.NonNull;
import android.support.design.widget.BottomNavigationView;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentManager;
import android.support.v4.app.FragmentStatePagerAdapter;
import android.support.v7.app.AppCompatActivity;
import android.util.Log;
import android.view.Display;
import android.view.KeyEvent;
import android.view.MenuItem;
import android.view.MotionEvent;
import android.view.View;
import android.view.ViewGroup;
import android.view.animation.AccelerateInterpolator;
import android.view.animation.Animation;
import android.view.animation.TranslateAnimation;
import android.widget.LinearLayout;
import android.widget.TabHost.TabSpec;
import android.widget.Toast;
import com.paypal.android.MEP.PayPal;
import com.vistrav.ask.Ask;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.HashMap;
import vn.mbm.phimp.me.database.AccountDBAdapter;
import vn.mbm.phimp.me.database.TumblrDBAdapter;
import vn.mbm.phimp.me.gallery.PhimpMeGallery;
import vn.mbm.phimp.me.utils.Commons;
import vn.mbm.phimp.me.utils.RSSPhotoItem;
import vn.mbm.phimp.me.utils.RSSPhotoItem_Personal;
//import android.widget.ImageView;
//import android.widget.TabHost;
//import android.widget.TabHost.OnTabChangeListener;
//import android.widget.TextView;
//import com.google.android.gms.ads.AdRequest;
//import com.google.android.maps.GeoPoint;
//
//@ReportsCrashes(formKey = "dFRsUzBJSWFKUFc3WmFjaXZab2V0dHc6MQ",
// mode = ReportingInteractionMode.TOAST,
// forceCloseDialogAfterToast = false,
// resToastText = R.string.crash_report_text)
@SuppressWarnings("deprecation")
public class PhimpMe extends AppCompatActivity implements BottomNavigationView.OnNavigationItemSelectedListener //, android.view.GestureDetector.OnGestureListener
{
public static Context ctx;
public static File DataDirectory;
public static final String PREFS_NAME = "PhimpMePrefs";
public static final String DATABASE_NAME = "PhimpMe";
private static final String DATA_DIRECTORY_NAME = "phimp.me";
public static int MAX_DISPLAY_PHOTOS;
public static int MAX_FILESIZE_DOWNLOAD;
/* Hon Nguyen */
public static String phimp_me_tmp;
public static Uri phimp_me_img_uri_temporary;
public static boolean FEEDS_GOOGLE_ADMOB;
public static boolean FEEDS_LIST_YAHOO_NEWS;
public static final String FEEDS_LIST_YAHOO_NEWS_TAG = "feeds_list_yahoo_news";
public static boolean FEEDS_LOCAL_GALLERY;
public static final String FEEDS_LOCAL_GALLERY_TAG = "feeds_local_gallery";
public static int check = 0;
public static boolean FEEDS_LIST_FLICKR_PUBLIC;
public static final String FEEDS_LIST_FLICKR_PUBLIC_TAG = "feeds_list_flickr_public";
public static boolean FEEDS_LIST_FLICKR_RECENT;
public static final String FEEDS_LIST_FLICKR_RECENT_TAG = "feeds_list_flickr_recent";
public static boolean FEEDS_LIST_FLICKR_PRIVATE;
public static final String FEEDS_LIST_FLICKR_PRIVATE_TAG = "feeds_list_flickr_private";
public static boolean FEEDS_LIST_GOOGLE_PICASA_PUBLIC;
public static final String FEEDS_LIST_GOOGLE_PICASA_PUBLIC_TAG = "feeds_list_google_picasa_public";
public static boolean FEEDS_LIST_GOOGLE_NEWS;
public static final String FEEDS_LIST_GOOGLE_NEWS_TAG = "feeds_list_google_news";
public static boolean FEEDS_LIST_GOOGLE_PICASA_PRIVATE;
public static final String FEEDS_LIST_GOOGLE_PICASA_PRIVATE_TAG = "feeds_list_google_picasa_private";
public static boolean FEEDS_LIST_DEVIANTART_PUBLIC;
public static final String FEEDS_LIST_DEVIANTART_PUBLIC_TAG = "feeds_list_deviantart_public";
public static boolean FEEDS_LIST_DEVIANTART_PRIVITE;
public static final String FEEDS_LIST_DEVIANTART_PRIVITE_TAG = "feeds_list_deviantart_privite";
public static boolean FEEDS_LIST_IMAGESHACK_PRIVITE;
public static final String FEEDS_LIST_IMAGESHACK_PRIVITE_TAG = "feeds_list_imageshack_privite";
public static boolean FEEDS_LIST_VK;
public static final String FEEDS_LIST_VK_TAG = "feeds_list_vk";
public static boolean FEEDS_LIST_FACEBOOK_PRIVATE;
public static final String FEEDS_LIST_FACEBOOK_PRIVATE_TAG = "feeds_list_facebook_private";
public static boolean FEEDS_LIST_TUMBLR_PRIVATE;
public static final String FEEDS_LIST_TUMBLR_PRIVATE_TAG = "feeds_list_tumblr_private";
public static boolean FEEDS_LIST_TWITTER_PRIVATE;
public static final String FEEDS_LIST_TWITTER_PRIVATE_TAG = "feeds_list_twitter_private";
public static boolean FEEDS_LIST_KAIXIN_PRIVATE;
public static final String FEEDS_LIST_KAIXIN_PRIVATE_TAG = "feeds_list_twitter_private";
public static boolean FEEDS_LIST_IMGUR_PERSONAL;
public static final String FEEDS_LIST_IMGUR_PERSONAL_TAG = "feeds_list_imgur_personal";
public static boolean FEEDS_LIST_IMGUR_PUBLIC;
public static final String FEEDS_LIST_IMGUR_PUBLIC_TAG = "feeds_list_imgur_public";
public static boolean FEEDS_LIST_MYSERVICES;
public static boolean FEEDS_LIST_MYSERVICES1;
public static boolean FEEDS_LIST_MYSERVICES2;
public static boolean FEEDS_LIST_MYSERVICES3;
public static boolean FEEDS_LIST_MYSERVICES4;
public static boolean FEEDS_LIST_MYSERVICES5;
public static final String FEDDS_LIST_MYSERVICES_TAG = "feeds_list_myservices";
public static final String FEDDS_LIST_MYSERVICES_TAG1 = "feeds_list_myservices";
public static final String FEDDS_LIST_MYSERVICES_TAG2 = "feeds_list_myservices";
public static final String FEDDS_LIST_MYSERVICES_TAG3 = "feeds_list_myservices";
public static final String FEDDS_LIST_MYSERVICES_TAG4 = "feeds_list_myservices";
public static final String FEDDS_LIST_MYSERVICES_TAG5 = "feeds_list_myservices";
public static String MY_FEED_URL = "";
public static boolean FEEDS_LIST_500PX_PRIVATE;
public static final String FEEDS_LIST_500PX_PRIVATE_TAG = "feeds_list_500px_private";
public static boolean FEEDS_LIST_500PX_PUBLIC;
public static final String FEEDS_LIST_500PX_PUBLIC_TAG = "feeds_list_500px_public";
public static boolean FEEDS_LIST_SOHU_PERSONAL;
public static final String FEEDS_LIST_SOHU_PERSONAL_TAG = "feeds_list_sohu_personal";
public static boolean add_account_upload, add_account_setting;
public static HashMap<String, Boolean> checked_accounts = new HashMap<String, Boolean>();
public static Uri UploadPhotoPreview;
public static boolean addCurrentPin = false;
//public static GeoPoint currentGeoPoint;
public static Double curLatitude, curLongtitude;
public static Double UploadLatitude, UploadLongitude;
public static LinearLayout popupTabs; // !?!
public static int camera_use;
//LOCAL
public static ArrayList<String> filepath = new ArrayList<String>();
public static ArrayList<Integer> IdList;
public static int local_count = 1;
ProgressDialog progConfig;
//new Gallery
static ArrayList<ArrayList<RSSPhotoItem>> phimpme_array_list = new ArrayList<ArrayList<RSSPhotoItem>>();
static ArrayList<ArrayList<RSSPhotoItem_Personal>> phimpme_personal_array_list = new ArrayList<ArrayList<RSSPhotoItem_Personal>>();
//Cache
public static CacheStore cache;
public static CacheTask cachetask;
//Crash Report
public static String CRITTERCISM_APP_ID = "4fffa20fbe790e4bc7000002";
boolean serviceDisabled = false;
public static boolean check_cache;
public static boolean check_export = true;
public static BottomNavigationView mBottomNav;
public static boolean check_download = false;
public static boolean check_download_local_gallery = true;
public static int flashStatus = 2;
//Gallery
public static boolean gallery_delete = false;
//private GestureDetector gestureScanner;
//View.OnTouchListener gestureListener;
public static int width, height;
HomeScreenState currentScreen = HomeScreenState.GALLERY;
@SuppressWarnings("unused")
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
ctx = this;
Log.d("thong", "PhimpMe - onCreate()");
// The following line triggers the initialization of ACRA
//ACRA.init((Application) ctx.getApplicationContext());
//Init PayPal library
new Thread(new Runnable() {
@Override
public void run() {
initLibrary(ctx);
}
}).start();
camera_use = 0;
if (IdList == null) IdList = new ArrayList<Integer>();
Ask.on(this)
.forPermissions(Manifest.permission.ACCESS_FINE_LOCATION
, Manifest.permission.WRITE_EXTERNAL_STORAGE
, Manifest.permission.CAMERA,
Manifest.permission.READ_PHONE_STATE)
.go();
setContentView(R.layout.main);
setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT);
//gestureScanner = new GestureDetector(this);
//Crash report
// Crittercism.init(getApplicationContext(), CRITTERCISM_APP_ID, serviceDisabled);
add_account_upload = false;
add_account_setting = false;
cache = CacheStore.getInstance();
cachetask = new CacheTask();
String[] str = null;
cachetask.execute(str);
/*
* get window width, height
*/
Display display = getWindowManager().getDefaultDisplay();
width = display.getWidth() / 3;
height = width;
File file0 = getBaseContext().getFileStreamPath("local_gallery.txt");
if (file0.exists()) {
try {
FileInputStream Rfile = openFileInput("local_gallery.txt");
InputStreamReader einputreader = new InputStreamReader(Rfile);
BufferedReader ebuffreader = new BufferedReader(einputreader);
Boolean tmp = Boolean.valueOf(ebuffreader.readLine());
PhimpMe.FEEDS_LOCAL_GALLERY = tmp;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
File file1 = getBaseContext().getFileStreamPath("flickr_public.txt");
if (file1.exists()) {
try {
FileInputStream Rfile = openFileInput("flickr_public.txt");
InputStreamReader einputreader = new InputStreamReader(Rfile);
BufferedReader ebuffreader = new BufferedReader(einputreader);
Boolean tmp = Boolean.valueOf(ebuffreader.readLine());
PhimpMe.FEEDS_LIST_FLICKR_PUBLIC = tmp;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
File file2 = getBaseContext().getFileStreamPath("flickr_recent.txt");
if (file2.exists()) {
try {
FileInputStream Rfile = openFileInput("flickr_recent.txt");
InputStreamReader einputreader = new InputStreamReader(Rfile);
BufferedReader ebuffreader = new BufferedReader(einputreader);
Boolean tmp = Boolean.valueOf(ebuffreader.readLine());
PhimpMe.FEEDS_LIST_FLICKR_RECENT = tmp;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
File file3 = getBaseContext().getFileStreamPath("google_news.txt");
if (file3.exists()) {
try {
FileInputStream Rfile = openFileInput("google_news.txt");
InputStreamReader einputreader = new InputStreamReader(Rfile);
BufferedReader ebuffreader = new BufferedReader(einputreader);
Boolean tmp = Boolean.valueOf(ebuffreader.readLine());
PhimpMe.FEEDS_LIST_GOOGLE_NEWS = tmp;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
File file4 = getBaseContext().getFileStreamPath("public_picasa.txt");
if (file4.exists()) {
try {
FileInputStream Rfile = openFileInput("public_picasa.txt");
InputStreamReader einputreader = new InputStreamReader(Rfile);
BufferedReader ebuffreader = new BufferedReader(einputreader);
Boolean tmp = Boolean.valueOf(ebuffreader.readLine());
PhimpMe.FEEDS_LIST_GOOGLE_PICASA_PUBLIC = tmp;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
File file5 = getBaseContext().getFileStreamPath("yahoo_news.txt");
if (file5.exists()) {
try {
FileInputStream Rfile = openFileInput("yahoo_news.txt");
InputStreamReader einputreader = new InputStreamReader(Rfile);
BufferedReader ebuffreader = new BufferedReader(einputreader);
Boolean tmp = Boolean.valueOf(ebuffreader.readLine());
PhimpMe.FEEDS_LIST_YAHOO_NEWS = tmp;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
File file6 = getBaseContext().getFileStreamPath("deviant_public.txt");
if (file6.exists()) {
try {
FileInputStream Rfile = openFileInput("deviant_public.txt");
InputStreamReader einputreader = new InputStreamReader(Rfile);
BufferedReader ebuffreader = new BufferedReader(einputreader);
Boolean tmp = Boolean.valueOf(ebuffreader.readLine());
PhimpMe.FEEDS_LIST_DEVIANTART_PUBLIC = tmp;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
File file7 = getBaseContext().getFileStreamPath("flick_private.txt");
if (file7.exists()) {
try {
FileInputStream Rfile = openFileInput("flick_private.txt");
InputStreamReader einputreader = new InputStreamReader(Rfile);
BufferedReader ebuffreader = new BufferedReader(einputreader);
Boolean tmp = Boolean.valueOf(ebuffreader.readLine());
PhimpMe.FEEDS_LIST_FLICKR_PRIVATE = tmp;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
File file8 = getBaseContext().getFileStreamPath("picasa_private.txt");
if (file8.exists()) {
try {
FileInputStream Rfile = openFileInput("picasa_private.txt");
InputStreamReader einputreader = new InputStreamReader(Rfile);
BufferedReader ebuffreader = new BufferedReader(einputreader);
Boolean tmp = Boolean.valueOf(ebuffreader.readLine());
PhimpMe.FEEDS_LIST_GOOGLE_PICASA_PRIVATE = tmp;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
File file9 = getBaseContext().getFileStreamPath("deviant_private.txt");
if (file9.exists()) {
try {
FileInputStream Rfile = openFileInput("deviant_private.txt");
InputStreamReader einputreader = new InputStreamReader(Rfile);
BufferedReader ebuffreader = new BufferedReader(einputreader);
Boolean tmp = Boolean.valueOf(ebuffreader.readLine());
PhimpMe.FEEDS_LIST_DEVIANTART_PRIVITE = tmp;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
File file10 = getBaseContext().getFileStreamPath("vk.txt");
if (file10.exists()) {
try {
FileInputStream Rfile = openFileInput("vk.txt");
InputStreamReader einputreader = new InputStreamReader(Rfile);
BufferedReader ebuffreader = new BufferedReader(einputreader);
Boolean tmp = Boolean.valueOf(ebuffreader.readLine());
PhimpMe.FEEDS_LIST_VK = tmp;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
File file11 = getBaseContext().getFileStreamPath("facebook.txt");
if (file11.exists()) {
try {
FileInputStream Rfile = openFileInput("facebook.txt");
InputStreamReader einputreader = new InputStreamReader(Rfile);
BufferedReader ebuffreader = new BufferedReader(einputreader);
Boolean tmp = Boolean.valueOf(ebuffreader.readLine());
PhimpMe.FEEDS_LIST_FACEBOOK_PRIVATE = tmp;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
File file12 = getBaseContext().getFileStreamPath("tumblr_private.txt");
if (file12.exists()) {
try {
FileInputStream Rfile = openFileInput("tumblr_private.txt");
InputStreamReader einputreader = new InputStreamReader(Rfile);
BufferedReader ebuffreader = new BufferedReader(einputreader);
Boolean tmp = Boolean.valueOf(ebuffreader.readLine());
PhimpMe.FEEDS_LIST_TUMBLR_PRIVATE = tmp;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
File file13 = getBaseContext().getFileStreamPath("imgur_personal.txt");
if (file13.exists()) {
try {
FileInputStream Rfile = openFileInput("imgur_personal.txt");
InputStreamReader einputreader = new InputStreamReader(Rfile);
BufferedReader ebuffreader = new BufferedReader(einputreader);
Boolean tmp = Boolean.valueOf(ebuffreader.readLine());
PhimpMe.FEEDS_LIST_IMGUR_PERSONAL = tmp;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
File file14 = getBaseContext().getFileStreamPath("sohu_personal.txt");
if (file14.exists()) {
try {
FileInputStream Rfile = openFileInput("sohu_personal.txt");
InputStreamReader einputreader = new InputStreamReader(Rfile);
BufferedReader ebuffreader = new BufferedReader(einputreader);
Boolean tmp = Boolean.valueOf(ebuffreader.readLine());
PhimpMe.FEEDS_LIST_SOHU_PERSONAL = tmp;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
/*
* Export data
*/
if (check_export = true) {
//exportDevicesInfomation();
//exportInstalledPakage();
//Intent intent = new Intent(this,CollectUserData.class);
//startService(intent);
//check_export = false;
}
TabSpec ts; // !?
View tbview;
Intent intent;
/*
* Thong - Load preferences
*/
SharedPreferences settings = getSharedPreferences(PREFS_NAME, 0);
MAX_DISPLAY_PHOTOS = settings.getInt("gallery_max_display_photos", getResources().getInteger(R.integer.gallery_max_display_photos));
MAX_FILESIZE_DOWNLOAD = settings.getInt("max_filesize_download", getResources().getInteger(R.integer.max_filesize_download));
FEEDS_LOCAL_GALLERY = settings.getBoolean(FEEDS_LOCAL_GALLERY_TAG, true);
/*FEEDS_LIST_FLICKR_PUBLIC = settings.getBoolean(FEEDS_LIST_FLICKR_PUBLIC_TAG, false);
FEEDS_LIST_FLICKR_RECENT = settings.getBoolean(FEEDS_LIST_FLICKR_RECENT_TAG, false);
FEEDS_LIST_YAHOO_NEWS = settings.getBoolean(FEEDS_LIST_YAHOO_NEWS_TAG, false);
FEEDS_LIST_GOOGLE_PICASA_PUBLIC = settings.getBoolean(FEEDS_LIST_GOOGLE_PICASA_PUBLIC_TAG, false);
FEEDS_LIST_GOOGLE_NEWS = settings.getBoolean(FEEDS_LIST_GOOGLE_NEWS_TAG, false);
FEEDS_LIST_VK = settings.getBoolean(FEEDS_LIST_VK_TAG, false);
FEEDS_LIST_FACEBOOK_PRIVATE = settings.getBoolean(FEEDS_LIST_FACEBOOK_PRIVATE_TAG, false);
FEEDS_LIST_FLICKR_PRIVATE = settings.getBoolean(FEEDS_LIST_FLICKR_PRIVATE_TAG, false);
FEEDS_LIST_GOOGLE_PICASA_PRIVATE= settings.getBoolean(FEEDS_LIST_GOOGLE_PICASA_PRIVATE_TAG, false);
FEEDS_LIST_TUMBLR_PRIVATE= settings.getBoolean(FEEDS_LIST_TUMBLR_PRIVATE_TAG, false);
FEEDS_LIST_DEVIANTART_PRIVITE= settings.getBoolean(FEEDS_LIST_DEVIANTART_PRIVITE_TAG, false);
FEEDS_LIST_DEVIANTART_PUBLIC= settings.getBoolean(FEEDS_LIST_DEVIANTART_PUBLIC_TAG, false);
FEEDS_LIST_GOOGLE_PICASA_PRIVATE= settings.getBoolean(FEEDS_LIST_GOOGLE_PICASA_PRIVATE_TAG, false);
FEEDS_LIST_TWITTER_PRIVATE= settings.getBoolean(FEEDS_LIST_TWITTER_PRIVATE_TAG, false);
FEEDS_LIST_KAIXIN_PRIVATE= settings.getBoolean(FEEDS_LIST_KAIXIN_PRIVATE_TAG, false);
FEEDS_LIST_IMGUR_PERSONAL= settings.getBoolean(FEEDS_LIST_IMGUR_PERSONAL_TAG, false);
FEEDS_LIST_MYSERVICES= settings.getBoolean(FEDDS_LIST_MYSERVICES_TAG, false);
FEEDS_LIST_IMGUR_PUBLIC= settings.getBoolean(FEEDS_LIST_IMGUR_PUBLIC_TAG, false);
FEEDS_LIST_500PX_PRIVATE=settings.getBoolean(FEEDS_LIST_500PX_PRIVATE_TAG, false);
FEEDS_LIST_500PX_PUBLIC= settings.getBoolean(FEEDS_LIST_500PX_PUBLIC_TAG, false);
FEEDS_LIST_SOHU_PERSONAL= settings.getBoolean(FEEDS_LIST_SOHU_PERSONAL_TAG, false);
FEEDS_LIST_MYSERVICES1= settings.getBoolean(FEDDS_LIST_MYSERVICES_TAG1, false);
FEEDS_LIST_MYSERVICES2= settings.getBoolean(FEDDS_LIST_MYSERVICES_TAG2, false);
FEEDS_LIST_MYSERVICES3= settings.getBoolean(FEDDS_LIST_MYSERVICES_TAG3, false);
FEEDS_LIST_MYSERVICES4= settings.getBoolean(FEDDS_LIST_MYSERVICES_TAG4, false);
FEEDS_LIST_MYSERVICES5= settings.getBoolean(FEDDS_LIST_MYSERVICES_TAG5, false);*/
/*
* Thong - Get data directory
*/
try {
DataDirectory = new File(Commons.getDataDirectory(ctx).getAbsolutePath() + "/" + DATA_DIRECTORY_NAME);
if (!DataDirectory.exists()) {
if (!DataDirectory.mkdirs()) {
Commons.AlertLog(ctx, "Cannot create Data Directory " + DataDirectory.getAbsolutePath(), "OK").show();
} else {
}
} else {
}
} catch (Exception e) {
e.printStackTrace();
Toast.makeText(getApplicationContext(), "Error: " + e.toString(), Toast.LENGTH_LONG).show();
}
/*
* Thong - Database file init
*/
File folder = new File(DataDirectory + "/PhimpMe_Photo_Effect");
folder.mkdirs();
File folder_take_photo = new File(DataDirectory + "/take_photo");
folder_take_photo.mkdirs();
phimp_me_tmp = folder + "/tmp.jpg";
phimp_me_img_uri_temporary = Uri.fromFile(new File(phimp_me_tmp));
File database_file = getDatabasePath(DATABASE_NAME);
if (!database_file.exists()) {
AccountDBAdapter db = new AccountDBAdapter(ctx);
db.open();
db.close();
TumblrDBAdapter db2 = new TumblrDBAdapter(ctx);
db2.open();
db2.close();
/* Clear memory */
db = null;
db2 = null;
}
/*
* Thong - Initial Tab control
*/
try {
mBottomNav = (BottomNavigationView) findViewById(R.id.navigation_view);
} catch (Exception e) {
}
mBottomNav.setOnNavigationItemSelectedListener(this);
mBottomNav.getMenu().getItem(0).setChecked(true);
// Initialising fragment container
if (findViewById(R.id.fragment_container) != null) {
newGallery frag = new newGallery();
getSupportFragmentManager().beginTransaction()
.add(R.id.fragment_container, frag)
.commit();
}
}
@Override
public boolean onNavigationItemSelected(@NonNull MenuItem item) {
switch (item.getItemId()) {
case R.id.tab_gallery:
if (currentScreen != HomeScreenState.GALLERY) {
newGallery frag = new newGallery();
getSupportFragmentManager().beginTransaction()
.setCustomAnimations(R.anim.fragment_anim_fadein,R.anim.fragment_anim_fadeout)
.replace(R.id.fragment_container, frag)
.commit();
currentScreen = HomeScreenState.GALLERY;
}
break;
case R.id.tab_map:
if (currentScreen != HomeScreenState.MAP) {
MapFragment map = new MapFragment();
getSupportFragmentManager().beginTransaction()
.setCustomAnimations(R.anim.fragment_anim_fadein, R.anim.fragment_anim_fadeout)
.replace(R.id.fragment_container, map)
.commit();
currentScreen = HomeScreenState.MAP;
}
break;
case R.id.tab_camera:
if (currentScreen != HomeScreenState.CAMERA) {
Camera2 camFrag = new Camera2();
getSupportFragmentManager().beginTransaction()
.setCustomAnimations(R.anim.fragment_anim_fadein,R.anim.fragment_anim_fadeout)
.replace(R.id.fragment_container, camFrag)
.commit();
currentScreen = HomeScreenState.CAMERA;
}
break;
case R.id.tab_upload:
if (currentScreen != HomeScreenState.UPLOAD) {
Upload frag = new Upload();
getSupportFragmentManager().beginTransaction()
.setCustomAnimations(R.anim.fragment_anim_fadein,R.anim.fragment_anim_fadeout)
.replace(R.id.fragment_container, frag)
.commit();
currentScreen = HomeScreenState.UPLOAD;
}
break;
case R.id.tab_settings:
if (currentScreen != HomeScreenState.SETTINGS) {
Settings frag = new Settings();
getSupportFragmentManager().beginTransaction()
.setCustomAnimations(R.anim.fragment_anim_fadein,R.anim.fragment_anim_fadeout)
.replace(R.id.fragment_container, frag)
.commit();
currentScreen = HomeScreenState.SETTINGS;
}
break;
}
return true;
}
/* public Animation inFromRightAnimation() {
Animation inFromRight = new TranslateAnimation(
Animation.RELATIVE_TO_PARENT, +1.0f,
Animation.RELATIVE_TO_PARENT, 0.0f,
Animation.RELATIVE_TO_PARENT, 0.0f,
Animation.RELATIVE_TO_PARENT, 0.0f);
inFromRight.setDuration(500);
inFromRight.setInterpolator(new AccelerateInterpolator());
return inFromRight;
}*/
// navigation bar tabs
// private void setTabs() {
// addTab("", R.drawable.tab_icon_gallery_selector, newGallery.class);
// addTab("", R.drawable.tab_icon_map_selector, OpenStreetMap.class);
// addTab("", R.drawable.tab_icon_map_selector, GalleryMap.class);
// addTab("", R.drawable.tab_icon_camera_selector, Blank.class);
// addTab("", R.drawable.tab_icon_upload_selector, Upload.class);
// addTab("", R.drawable.tab_icon_settings_selector, Settings.class);
// }
//
// private void addTab(String labelId, int drawableId, Class<?> c) {
// TabHost.TabSpec spec = mTabHost.newTabSpec("tab" + labelId);
// View tabIndicator = LayoutInflater.from(this).inflate(R.layout.tab_indicator, getTabWidget(), false);
// TextView title = (TextView) tabIndicator.findViewById(R.id.title);
// title.setText(labelId);
// ImageView icon = (ImageView) tabIndicator.findViewById(R.id.icon);
// icon.setImageResource(drawableId);
// spec.setIndicator(tabIndicator);
// Intent intent = new Intent(this, c);
// spec.setContent(intent);
// mTabHost.addTab(spec);
//
//
// }
enum HomeScreenState {
// todo: add as needed
GALLERY,
UPLOAD,
SETTINGS,
CAMERA,
MAP
}
public Animation outToLeftAnimation() {
Animation outtoLeft = new TranslateAnimation(
Animation.RELATIVE_TO_PARENT, 0.0f,
Animation.RELATIVE_TO_PARENT, -1.0f,
Animation.RELATIVE_TO_PARENT, 0.0f,
Animation.RELATIVE_TO_PARENT, 0.0f);
outtoLeft.setDuration(500);
outtoLeft.setInterpolator(new AccelerateInterpolator());
return outtoLeft;
}
// Show Tabs method
public static void showTabs() {
mBottomNav.setVisibility(ViewGroup.VISIBLE);
}
// Hide Tabs method
public static void hideTabs() {
// mBottomNav.setVisibility(ViewGroup.GONE);
}
@Override
protected void onPause() {
Log.d("thong", "Run PhimpMe.onPause()");
super.onPause();
showTabs();
SharedPreferences settings = getSharedPreferences(PREFS_NAME, 0);
SharedPreferences.Editor editor = settings.edit();
editor.putInt("gallery_max_display_photos", MAX_DISPLAY_PHOTOS);
editor.putInt("max_filesize_download", MAX_FILESIZE_DOWNLOAD);
editor.putBoolean(FEEDS_LIST_YAHOO_NEWS_TAG, FEEDS_LIST_YAHOO_NEWS);
editor.putBoolean(FEEDS_LIST_FLICKR_PUBLIC_TAG, FEEDS_LIST_FLICKR_PUBLIC);
editor.putBoolean(FEEDS_LIST_FLICKR_RECENT_TAG, FEEDS_LIST_FLICKR_RECENT);
editor.putBoolean(FEEDS_LIST_GOOGLE_PICASA_PUBLIC_TAG, FEEDS_LIST_GOOGLE_PICASA_PUBLIC);
editor.putBoolean(FEEDS_LIST_GOOGLE_NEWS_TAG, FEEDS_LIST_GOOGLE_NEWS);
editor.putBoolean(FEEDS_LIST_VK_TAG, FEEDS_LIST_VK);
editor.putBoolean(FEEDS_LIST_FACEBOOK_PRIVATE_TAG, FEEDS_LIST_FACEBOOK_PRIVATE);
editor.putBoolean(FEEDS_LIST_FLICKR_PRIVATE_TAG, FEEDS_LIST_FLICKR_PRIVATE);
editor.putBoolean(FEEDS_LIST_GOOGLE_PICASA_PRIVATE_TAG, FEEDS_LIST_GOOGLE_PICASA_PRIVATE);
editor.putBoolean(FEEDS_LIST_DEVIANTART_PRIVITE_TAG, FEEDS_LIST_DEVIANTART_PRIVITE);
editor.putBoolean(FEEDS_LIST_TUMBLR_PRIVATE_TAG, FEEDS_LIST_TUMBLR_PRIVATE);
editor.putBoolean(FEEDS_LIST_TWITTER_PRIVATE_TAG, FEEDS_LIST_TWITTER_PRIVATE);
editor.putBoolean(FEEDS_LIST_DEVIANTART_PUBLIC_TAG, FEEDS_LIST_DEVIANTART_PUBLIC);
editor.putBoolean(FEEDS_LIST_IMAGESHACK_PRIVITE_TAG, FEEDS_LIST_IMAGESHACK_PRIVITE);
editor.putBoolean(FEEDS_LIST_KAIXIN_PRIVATE_TAG, FEEDS_LIST_KAIXIN_PRIVATE);
editor.putBoolean(FEEDS_LIST_IMGUR_PERSONAL_TAG, FEEDS_LIST_IMGUR_PERSONAL);
editor.putBoolean(FEDDS_LIST_MYSERVICES_TAG, FEEDS_LIST_MYSERVICES);
editor.putBoolean(FEEDS_LIST_IMGUR_PUBLIC_TAG, FEEDS_LIST_IMGUR_PUBLIC);
editor.putBoolean(FEEDS_LIST_500PX_PRIVATE_TAG, FEEDS_LIST_500PX_PRIVATE);
editor.putBoolean(FEEDS_LIST_500PX_PUBLIC_TAG, FEEDS_LIST_500PX_PUBLIC);
editor.putBoolean(FEEDS_LIST_SOHU_PERSONAL_TAG, FEEDS_LIST_SOHU_PERSONAL);
editor.putBoolean(FEDDS_LIST_MYSERVICES_TAG1, FEEDS_LIST_MYSERVICES1);
editor.putBoolean(FEDDS_LIST_MYSERVICES_TAG2, FEEDS_LIST_MYSERVICES2);
editor.putBoolean(FEDDS_LIST_MYSERVICES_TAG3, FEEDS_LIST_MYSERVICES3);
editor.putBoolean(FEDDS_LIST_MYSERVICES_TAG4, FEEDS_LIST_MYSERVICES4);
editor.putBoolean(FEDDS_LIST_MYSERVICES_TAG5, FEEDS_LIST_MYSERVICES5);
// Commit the edits!
if (editor.commit()) {
Log.d("thong", "Commit success");
} else {
Log.d("thong", "Commit fail");
}
}
@Override
public void onResume() {
//showTabs();
Log.e("PhimpMe", "Resume");
try {
super.onResume();
} catch (Exception e) {
e.printStackTrace();
}
if (gallery_delete) {
newGallery.update(PhimpMeGallery.num);
}
}
@Override
public boolean onKeyDown(int keycode, KeyEvent event) {
if (keycode == KeyEvent.KEYCODE_BACK) {
AlertDialog.Builder alertbox = new AlertDialog.Builder(ctx);
alertbox.setMessage(getString(R.string.exit_message));
alertbox.setPositiveButton(getString(R.string.yes), new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
finish();
System.exit(0);
}
});
alertbox.setNegativeButton(getString(R.string.cancel), new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
//Resume to current process
}
});
alertbox.create().show();
}
return false;
}
/*public boolean onTouchEvent(MotionEvent me) {
return gestureScanner.onTouchEvent(me);
}*/
public boolean onFling(MotionEvent e1, MotionEvent e2, float velocityX, float velocityY) {
float dispX = e2.getX() - e1.getX();
float dispY = e2.getY() - e1.getY();
if (Math.abs(dispX) >= 200 && Math.abs(dispY) <= 100) {
// swipe ok
if (dispX > 0) {
// L-R swipe
//changeRtoL();
} else {
// R-L swipe
//changeLtoR();
}
}
return true;
}
// fixme: properly implement changeLtoR and changeRtoL
// Currently the above function is the only use of this
/*
private void changeLtoR() {
int curTab = mTabHost.getCurrentTab();
int nextTab = ((curTab + 1) % 4); // !?! (why mod 4)
mTabHost.setCurrentTab(nextTab);
}
private void changeRtoL() {
int curTab = mTabHost.getCurrentTab();
if (curTab != 0) {
int lastTab = ((curTab - 1) % 4);
mTabHost.setCurrentTab(lastTab);
}
}
*/
/* public boolean dispatchTouchEvent(MotionEvent ev) {
if (gestureScanner != null) {
if (gestureScanner.onTouchEvent(ev))
return true;
}
return super.dispatchTouchEvent(ev);
}*/
public void initialize() {
int id;
final String[] columns = {MediaStore.Images.Thumbnails._ID};
final String[] data = {MediaStore.Images.Media.DATA};
final String orderBy = MediaStore.Images.Media._ID;
Cursor pathcursor = this.getContentResolver().query(
MediaStore.Images.Media.EXTERNAL_CONTENT_URI,
data,
null,
null,
orderBy
);
if (pathcursor != null) {
int path_column_index = pathcursor
.getColumnIndexOrThrow(MediaStore.Images.Media.DATA);
int count = pathcursor.getCount();
int c = 0;
for (int i = 0; i < count; i++) {
pathcursor.moveToPosition(i);
String path = pathcursor.getString(path_column_index);
boolean check = cache.check(path);
if (check) {
@SuppressWarnings("unused")
int index = Integer.valueOf(PhimpMe.cache.getCacheId(path));
@SuppressWarnings("unused")
Bitmap bmp = PhimpMe.cache.getCachePath(path);
} else if (c <= 20) {
Cursor cursor = this.getContentResolver().query(
MediaStore.Images.Media.EXTERNAL_CONTENT_URI,
columns,
MediaStore.Images.Media.DATA + " = " + "\"" + path + "\"",
null,
MediaStore.Images.Media._ID
);
if (cursor != null && cursor.getCount() > 0) {
cursor.moveToPosition(0);
id = cursor.getInt(cursor.getColumnIndexOrThrow(MediaStore.Images.Media._ID));
Bitmap bmp = MediaStore.Images.Thumbnails.getThumbnail(
getApplicationContext().getContentResolver(), id,
MediaStore.Images.Thumbnails.MICRO_KIND, null);
PhimpMe.cache.saveCacheFile(path, bmp, id);
} else id = -1;
c++;
}
}
newGallery.update_number++;
}
}
public static void stopThread() {
cachetask.onCancelled();
Log.d("PhimpMe", "Stop Cache Task");
}
public class CacheTask extends AsyncTask<String, Void, String> {
@Override
protected String doInBackground(String... urls) {
try {
Log.d("luong", "Run Cache Task");
initialize();
} catch (RuntimeException runex) {
}
return "";
}
@Override
protected void onPostExecute(String result) {
}
@Override
protected void onCancelled() {
// TODO Auto-generated method stub
super.onCancelled();
}
}
public void onTabChanged(String tabId) {
// TODO Auto-generated method stub
}
public boolean onTouch(View v, MotionEvent event) {
// TODO Auto-generated method stub
return false;
}
public void onGesture(GestureOverlayView overlay, MotionEvent event) {
// TODO Auto-generated method stub
}
public void onGestureCancelled(GestureOverlayView overlay, MotionEvent event) {
// TODO Auto-generated method stub
}
public static void initLibrary(Context context) {
try {
PayPal pp = PayPal.getInstance();
// If the library is already initialized, then we don't need to
// initialize it again.
if ((pp == null) || (!pp.isLibraryInitialized())) {
pp = null;
pp = PayPal.initWithAppID(context, "APP-80W284485P519543T", PayPal.ENV_SANDBOX);
// -- These are required settings.
//pp.setLanguage("de_DE");
pp.setLanguage("en_US");
pp.setFeesPayer(PayPal.FEEPAYER_EACHRECEIVER);
// Set to true if the transaction will require shipping.
pp.setShippingEnabled(true);
pp.setDynamicAmountCalculationEnabled(false);
}
} catch (IllegalStateException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
private class FragmentAdapter extends FragmentStatePagerAdapter{
public FragmentAdapter(FragmentManager fm) {
super(fm);
}
@Override
public Fragment getItem(int position) {
switch (position){
case 0:
return new newGallery();
case 3:
return new MapFragment();
case 2:
return new Camera2();
case 1:
return new Upload();
default:
return new Settings();
}
}
@Override
public int getCount() {
return 5;
}
}
}
| 1 | 10,740 | Remove the trailing white space | fossasia-phimpme-android | java |
@@ -21,7 +21,7 @@ mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
- and mmcv_version <= digit_version(mmcv_maximum_version)), \
+ and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
| 1 | import mmcv
from .version import __version__, short_version
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return digit_version
mmcv_minimum_version = '1.1.5'
mmcv_maximum_version = '1.3'
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version <= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
__all__ = ['__version__', 'short_version']
| 1 | 21,801 | No need to modify this. | open-mmlab-mmdetection | py |
@@ -14,7 +14,7 @@ import (
madns "github.com/multiformats/go-multiaddr-dns"
)
-func Discover(ctx context.Context, addr ma.Multiaddr, f func(ma.Multiaddr) (stop bool, err error)) (stopped bool, err error) {
+func Discover(ctx context.Context, addr ma.Multiaddr, f func(ma.Multiaddr) (bool, error)) (bool, error) {
if comp, _ := ma.SplitFirst(addr); comp.Protocol().Name != "dnsaddr" {
return f(addr)
} | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package p2p
import (
"context"
"errors"
"fmt"
"math/rand"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
)
func Discover(ctx context.Context, addr ma.Multiaddr, f func(ma.Multiaddr) (stop bool, err error)) (stopped bool, err error) {
if comp, _ := ma.SplitFirst(addr); comp.Protocol().Name != "dnsaddr" {
return f(addr)
}
dnsResolver := madns.DefaultResolver
addrs, err := dnsResolver.Resolve(ctx, addr)
if err != nil {
return false, fmt.Errorf("dns resolve address %s: %w", addr, err)
}
if len(addrs) == 0 {
return false, errors.New("non-resolvable API endpoint")
}
rand.Shuffle(len(addrs), func(i, j int) {
addrs[i], addrs[j] = addrs[j], addrs[i]
})
for _, addr := range addrs {
stopped, err = Discover(ctx, addr, f)
if err != nil {
return false, fmt.Errorf("discover %s: %w", addr, err)
}
if stopped {
break
}
}
return false, nil
}
| 1 | 12,204 | would be nice at some point to clean this signature up... not relevant for this PR | ethersphere-bee | go |
@@ -342,6 +342,12 @@ class Configurator
),
$fieldConfiguration
);
+
+ // if the 'type' is not set explicitly for a virtual field,
+ // consider it as a string, so the backend displays its contents
+ if (null === $normalizedConfiguration['type']) {
+ $normalizedConfiguration['type'] = 'text';
+ }
} else {
// this is a regular field that exists as a property of the related Doctrine entity
$normalizedConfiguration = array_replace( | 1 | <?php
/*
* This file is part of the EasyAdminBundle.
*
* (c) Javier Eguiluz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace JavierEguiluz\Bundle\EasyAdminBundle\Configuration;
use Doctrine\Common\Persistence\Mapping\ClassMetadata;
use Doctrine\ORM\Mapping\ClassMetadataInfo;
use JavierEguiluz\Bundle\EasyAdminBundle\Reflection\EntityMetadataInspector;
use JavierEguiluz\Bundle\EasyAdminBundle\Reflection\ClassPropertyReflector;
class Configurator
{
private $backendConfig = array();
private $entitiesConfig = array();
private $inspector;
private $reflector;
private $defaultEntityFields = array();
private $defaultEntityFieldConfiguration = array(
'class' => null, // CSS class or classes applied to form field
'format' => null, // date/time/datetime/number format applied to form field value
'help' => null, // form field help message
'label' => null, // form field label (if 'null', autogenerate it)
'type' => null, // its value matches the value of 'dataType' for list/show and the value of 'fieldType' for new/edit
'fieldType' => null, // Symfony form field type (text, date, number, choice, ...) used to display the field
'dataType' => null, // Data type (text, date, integer, boolean, ...) of the Doctrine property associated with the field
'virtual' => false, // is a virtual field or a real Doctrine entity property?
'sortable' => true, // listings can be sorted according to the values of this field
'template' => null, // the path of the template used to render the field in 'show' and 'list' views
'type_options' => array(), // the options passed to the Symfony Form type used to render the form field
);
private $doctrineTypeToFormTypeMap = array(
'array' => 'collection',
'association' => null,
'bigint' => 'text',
'blob' => 'textarea',
'boolean' => 'checkbox',
'date' => 'date',
'datetime' => 'datetime',
'datetimetz' => 'datetime',
'decimal' => 'number',
'float' => 'number',
'guid' => 'text',
'integer' => 'integer',
'json_array' => 'textarea',
'object' => 'textarea',
'simple_array' => 'collection',
'smallint' => 'integer',
'string' => 'text',
'text' => 'textarea',
'time' => 'time',
);
public function __construct(array $backendConfig, EntityMetadataInspector $inspector, ClassPropertyReflector $reflector)
{
$this->backendConfig = $backendConfig;
$this->inspector = $inspector;
$this->reflector = $reflector;
}
/**
* Processes and returns the full configuration for the given entity name.
* This configuration includes all the information about the form fields
* and properties of the entity.
*
* @param string $entityName
*
* @return array The full entity configuration
*/
public function getEntityConfiguration($entityName)
{
// if the configuration has already been processed for the given entity, reuse it
if (isset($this->entitiesConfig[$entityName])) {
return $this->entitiesConfig[$entityName];
}
if (!isset($this->backendConfig['entities'][$entityName])) {
throw new \InvalidArgumentException(sprintf('Entity "%s" is not managed by EasyAdmin.', $entityName));
}
$entityConfiguration = $this->backendConfig['entities'][$entityName];
$entityMetadata = $this->inspector->getEntityMetadata($entityConfiguration['class']);
$entityConfiguration['primary_key_field_name'] = $entityMetadata->getSingleIdentifierFieldName();
$entityProperties = $this->processEntityPropertiesMetadata($entityMetadata);
$entityConfiguration['properties'] = $entityProperties;
// default fields used when the view (list, edit, etc.) doesn't define its own fields
$this->defaultEntityFields = $this->createFieldsFromEntityProperties($entityProperties);
$entityConfiguration['list']['fields'] = $this->getFieldsForListView($entityConfiguration);
$entityConfiguration['show']['fields'] = $this->getFieldsForShowView($entityConfiguration);
$entityConfiguration['edit']['fields'] = $this->getFieldsForFormBasedViews('edit', $entityConfiguration);
$entityConfiguration['new']['fields'] = $this->getFieldsForFormBasedViews('new', $entityConfiguration);
$entityConfiguration['search']['fields'] = $this->getFieldsForSearchAction($entityConfiguration);
$entityConfiguration = $this->introspectGettersAndSetters($entityConfiguration);
$entityConfiguration = $this->processFieldTemplates($entityConfiguration);
$this->entitiesConfig[$entityName] = $entityConfiguration;
return $entityConfiguration;
}
/**
* Takes the entity metadata introspected via Doctrine and completes its
* contents to simplify data processing for the rest of the application.
*
* @param ClassMetadata $entityMetadata The entity metadata introspected via Doctrine
*
* @return array The entity properties metadata provided by Doctrine
*/
private function processEntityPropertiesMetadata(ClassMetadata $entityMetadata)
{
$entityPropertiesMetadata = array();
if ($entityMetadata->isIdentifierComposite) {
throw new \RuntimeException(sprintf("The '%s' entity isn't valid because it contains a composite primary key.", $entityMetadata->name));
}
// introspect regular entity fields
foreach ($entityMetadata->fieldMappings as $fieldName => $fieldMetadata) {
// field names are tweaked this way to simplify Twig templates and extensions
$fieldName = str_replace('_', '', $fieldName);
$entityPropertiesMetadata[$fieldName] = $fieldMetadata;
}
// introspect fields for entity associations (except many-to-many)
foreach ($entityMetadata->associationMappings as $fieldName => $associationMetadata) {
if (ClassMetadataInfo::MANY_TO_MANY !== $associationMetadata['type']) {
$entityPropertiesMetadata[$fieldName] = array(
'type' => 'association',
'associationType' => $associationMetadata['type'],
'fieldName' => $fieldName,
'fetch' => $associationMetadata['fetch'],
'isOwningSide' => $associationMetadata['isOwningSide'],
'targetEntity' => $associationMetadata['targetEntity'],
);
}
}
return $entityPropertiesMetadata;
}
/**
* Returns the list of fields to show in the 'list' view of this entity.
*
* @param array $entityConfiguration
*
* @return array The list of fields to show and their metadata
*/
private function getFieldsForListView(array $entityConfiguration)
{
if (0 === count($entityConfiguration['list']['fields'])) {
$entityConfiguration['list']['fields'] = $this->filterListFieldsBasedOnSmartGuesses($this->defaultEntityFields);
}
return $this->normalizeFieldsConfiguration('list', $entityConfiguration);
}
/**
* Returns the list of fields to show in the 'show' view of this entity.
*
* @param array $entityConfiguration
*
* @return array The list of fields to show and their metadata
*/
private function getFieldsForShowView(array $entityConfiguration)
{
if (0 === count($entityConfiguration['show']['fields'])) {
$entityConfiguration['show']['fields'] = $this->defaultEntityFields;
}
return $this->normalizeFieldsConfiguration('show', $entityConfiguration);
}
/**
* Returns the list of fields to show in the forms of the given view
* ('edit' or 'new').
*
* @param string $view
* @param array $entityConfiguration
*
* @return array The list of fields to show and their metadata
*/
protected function getFieldsForFormBasedViews($view, array $entityConfiguration)
{
if (0 === count($entityConfiguration[$view]['fields'])) {
$excludedFieldNames = array($entityConfiguration['primary_key_field_name']);
$excludedFieldTypes = array('binary', 'blob', 'json_array', 'object');
$entityConfiguration[$view]['fields'] = $this->filterFieldsByNameAndType($this->defaultEntityFields, $excludedFieldNames, $excludedFieldTypes);
}
return $this->normalizeFieldsConfiguration($view, $entityConfiguration);
}
/**
* Returns the list of entity fields on which the search query is performed.
*
* @return array The list of fields to use for the search
*/
private function getFieldsForSearchAction(array $entityConfiguration)
{
if (0 === count($entityConfiguration['search']['fields'])) {
$excludedFieldNames = array();
$excludedFieldTypes = array('association', 'binary', 'boolean', 'blob', 'date', 'datetime', 'datetimetz', 'time', 'object');
$entityConfiguration['search']['fields'] = $this->filterFieldsByNameAndType($this->defaultEntityFields, $excludedFieldNames, $excludedFieldTypes);
}
return $this->normalizeFieldsConfiguration('search', $entityConfiguration);
}
/**
* If the backend configuration doesn't define any options for the fields of some entity,
* create some basic field configuration based on the entity's Doctrine metadata.
*
* @param array $entityProperties
*
* @return array The array of fields
*/
private function createFieldsFromEntityProperties($entityProperties)
{
$fields = array();
foreach ($entityProperties as $propertyName => $propertyMetadata) {
$metadata = array_replace($this->defaultEntityFieldConfiguration, $propertyMetadata);
$metadata['property'] = $propertyName;
$metadata['dataType'] = $propertyMetadata['type'];
$metadata['fieldType'] = $this->getFormTypeFromDoctrineType($propertyMetadata['type']);
$metadata['format'] = $this->getFieldFormat($propertyMetadata['type']);
$fields[$propertyName] = $metadata;
}
return $fields;
}
/**
* Guesses the best fields to display in a listing when the entity doesn't
* define any configuration. It does so limiting the number of fields to
* display and discarding several field types.
*
* @param array $entityFields
*
* @return array The list of fields to display
*/
private function filterListFieldsBasedOnSmartGuesses(array $entityFields)
{
// empirical guess: listings with more than 7 fields look ugly
$maxListFields = 7;
$excludedFieldNames = array('password', 'salt', 'slug', 'updatedAt', 'uuid');
$excludedFieldTypes = array('array', 'binary', 'blob', 'guid', 'json_array', 'object', 'simple_array', 'text');
// if the entity has few fields, show them all
if (count($entityFields) <= $maxListFields) {
return $entityFields;
}
// if the entity has a lot of fields, try to guess which fields we can remove
$filteredFields = $entityFields;
foreach ($entityFields as $name => $metadata) {
if (in_array($name, $excludedFieldNames) || in_array($metadata['type'], $excludedFieldTypes)) {
unset($filteredFields[$name]);
// whenever a field is removed, check again if we are below the acceptable number of fields
if (count($filteredFields) <= $maxListFields) {
return $filteredFields;
}
}
}
// if the entity has still a lot of remaining fields, just slice the last ones
return array_slice($filteredFields, 0, $maxListFields);
}
/**
* Filters a list of fields excluding the given list of field names and field types.
*
* @param array $fields
* @param string[] $excludedFieldNames
* @param string[] $excludedFieldTypes
*
* @return array The filtered list of fields
*/
private function filterFieldsByNameAndType(array $fields, array $excludedFieldNames, array $excludedFieldTypes)
{
$filteredFields = array();
foreach ($fields as $name => $metadata) {
if (!in_array($name, $excludedFieldNames) && !in_array($metadata['type'], $excludedFieldTypes)) {
$filteredFields[$name] = $fields[$name];
}
}
return $filteredFields;
}
/**
* Merges all the information about the fields associated with the given view
* to return the complete set of normalized field configuration.
*
* @param string $view
* @param array $entityConfiguration
*
* @return array The complete field configuration
*/
private function normalizeFieldsConfiguration($view, $entityConfiguration)
{
$configuration = array();
$fieldsConfiguration = $entityConfiguration[$view]['fields'];
$originalViewConfiguration = $this->backendConfig['entities'][$entityConfiguration['name']][$view];
foreach ($fieldsConfiguration as $fieldName => $fieldConfiguration) {
$originalFieldConfiguration = isset($originalViewConfiguration['fields'][$fieldName]) ? $originalViewConfiguration['fields'][$fieldName] : null;
if (!array_key_exists($fieldName, $entityConfiguration['properties'])) {
// this field doesn't exist as a property of the related Doctrine
// entity. Treat it as a 'virtual' field and provide default values
// for some field options (such as fieldName and columnName) to avoid
// any problem while processing field data
$normalizedConfiguration = array_replace(
$this->defaultEntityFieldConfiguration,
array(
'columnName' => null,
'fieldName' => $fieldName,
'id' => false,
'label' => $fieldName,
'sortable' => false,
'virtual' => true,
),
$fieldConfiguration
);
} else {
// this is a regular field that exists as a property of the related Doctrine entity
$normalizedConfiguration = array_replace(
$this->defaultEntityFieldConfiguration,
$entityConfiguration['properties'][$fieldName],
$fieldConfiguration
);
}
// virtual fields and associations different from *-to-one cannot be sorted in listings
$isToManyAssociation = 'association' === $normalizedConfiguration['type']
&& in_array($normalizedConfiguration['associationType'], array(ClassMetadataInfo::ONE_TO_MANY, ClassMetadataInfo::MANY_TO_MANY));
if (true === $normalizedConfiguration['virtual'] || $isToManyAssociation) {
$normalizedConfiguration['sortable'] = false;
}
// special case: if the field is called 'id' and doesn't define a custom
// label, use 'ID' as label. This improves the readability of the label
// of this important field, which is usually related to the primary key
if ('id' === $normalizedConfiguration['fieldName'] && !isset($normalizedConfiguration['label'])) {
$normalizedConfiguration['label'] = 'ID';
}
// 'list', 'search' and 'show' views: use the value of the 'type' option
// as the 'dataType' option because the previous code has already
// prioritized end-user preferences over Doctrine and default values
if (in_array($view, array('list', 'search', 'show'))) {
$normalizedConfiguration['dataType'] = $normalizedConfiguration['type'];
}
// 'new' and 'edit' views: if the user has defined the 'type' option
// for the field, use it as 'fieldType'. Otherwise, infer the best field
// type using the property data type.
if (in_array($view, array('edit', 'new'))) {
$normalizedConfiguration['fieldType'] = isset($originalFieldConfiguration['type'])
? $originalFieldConfiguration['type']
: $this->getFormTypeFromDoctrineType($normalizedConfiguration['type']);
}
// special case for the 'list' view: 'boolean' properties are displayed
// as toggleable flip switches when certain conditions are met
if ('list' === $view && 'boolean' === $normalizedConfiguration['dataType']) {
// conditions:
// 1) the end-user hasn't configured the field type explicitly
// 2) the 'edit' action is enabled for the 'list' view of this entity
$isEditActionEnabled = array_key_exists('edit', $entityConfiguration['list']['actions']);
if (!isset($originalFieldConfiguration['type']) && $isEditActionEnabled) {
$normalizedConfiguration['dataType'] = 'toggle';
}
}
if (null === $normalizedConfiguration['format']) {
$normalizedConfiguration['format'] = $this->getFieldFormat($normalizedConfiguration['type']);
}
$configuration[$fieldName] = $normalizedConfiguration;
}
return $configuration;
}
/**
* Returns the date/time/datetime/number format for the given field
* according to its type and the default formats defined for the backend.
*
* @param string $fieldType
*
* @return string The format that should be applied to the field value
*/
private function getFieldFormat($fieldType)
{
if (in_array($fieldType, array('date', 'time', 'datetime', 'datetimetz'))) {
// make 'datetimetz' use the same format as 'datetime'
$fieldType = ('datetimetz' === $fieldType) ? 'datetime' : $fieldType;
return $this->backendConfig['formats'][$fieldType];
}
if (in_array($fieldType, array('bigint', 'integer', 'smallint', 'decimal', 'float'))) {
return isset($this->backendConfig['formats']['number']) ? $this->backendConfig['formats']['number'] : null;
}
}
/**
* Introspects the getters and setters for the fields used by all views.
* This preprocessing saves a lot of further processing when accessing or
* setting the value of the entity properties.
*
* @param array $entityConfiguration
*
* @return array
*/
private function introspectGettersAndSetters($entityConfiguration)
{
foreach (array('new', 'edit', 'list', 'show', 'search') as $view) {
$fieldsConfiguration = $entityConfiguration[$view]['fields'];
foreach ($fieldsConfiguration as $fieldName => $fieldConfiguration) {
$getter = $this->reflector->getGetter($entityConfiguration['class'], $fieldName);
$fieldConfiguration['getter'] = $getter;
$setter = $this->reflector->getSetter($entityConfiguration['class'], $fieldName);
$fieldConfiguration['setter'] = $setter;
$isPublic = $this->reflector->isPublic($entityConfiguration['class'], $fieldName);
$fieldConfiguration['isPublic'] = $isPublic;
$fieldConfiguration['canBeGet'] = $getter || $isPublic;
$fieldConfiguration['canBeSet'] = $setter || $isPublic;
$entityConfiguration[$view]['fields'][$fieldName] = $fieldConfiguration;
}
}
return $entityConfiguration;
}
/**
* Determines the template used to render each backend element. This is not
* trivial because templates can depend on the entity displayed and they
* define an advanced override mechanism.
*
* @param array $entityConfiguration
*
* @return array
*/
private function processFieldTemplates(array $entityConfiguration)
{
foreach (array('list', 'show') as $view) {
foreach ($entityConfiguration[$view]['fields'] as $fieldName => $fieldMetadata) {
if (null !== $fieldMetadata['template']) {
continue;
}
// this prevents the template from displaying the 'id' primary key formatted as a number
if ('id' === $fieldName) {
$template = $entityConfiguration['templates']['field_id'];
} elseif (array_key_exists('field_'.$fieldMetadata['type'], $entityConfiguration['templates'])) {
$template = $entityConfiguration['templates']['field_'.$fieldMetadata['type']];
} else {
$template = $entityConfiguration['templates']['label_undefined'];
}
$entityConfiguration[$view]['fields'][$fieldName]['template'] = $template;
}
}
return $entityConfiguration;
}
/**
* Returns the most appropriate Symfony Form type for the given Doctrine type.
*
* @param string $doctrineType
*
* @return string
*/
private function getFormTypeFromDoctrineType($doctrineType)
{
// don't change this array_key_exists() by isset() because the Doctrine
// type map can return 'null' values that should be treated like that
return array_key_exists($doctrineType, $this->doctrineTypeToFormTypeMap)
? $this->doctrineTypeToFormTypeMap[$doctrineType]
: $doctrineType;
}
/**
* Exposes the backend configuration to any external method that needs it.
*
* @return array
*/
public function getBackendConfig()
{
return $this->backendConfig;
}
}
| 1 | 9,115 | Here, is it `null` because it is `null` in `$fieldConfiguration`, or because it has been set to `null` above in the `array_replace` ? If it doesn't exists at all in `$fieldConfiguration`, I think you should add `type => 'text'` above in the `array_replace`. | EasyCorp-EasyAdminBundle | php |
@@ -620,7 +620,7 @@ public class JavaParserFacade {
* references an outer class -- as its ancestor, return the declaration corresponding to the class name specified.
*/
protected Node findContainingTypeDeclOrObjectCreationExpr(Node node, String className) {
- if (node instanceof ClassOrInterfaceDeclaration && ((ClassOrInterfaceDeclaration) node).getFullyQualifiedName().get().equals(className)) {
+ if (node instanceof ClassOrInterfaceDeclaration && ((ClassOrInterfaceDeclaration) node).getFullyQualifiedName().get().endsWith(className)) {
return node;
}
if (node instanceof EnumDeclaration) { | 1 | /*
* Copyright (C) 2015-2016 Federico Tomassetti
* Copyright (C) 2017-2020 The JavaParser Team.
*
* This file is part of JavaParser.
*
* JavaParser can be used either under the terms of
* a) the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* b) the terms of the Apache License
*
* You should have received a copy of both licenses in LICENCE.LGPL and
* LICENCE.APACHE. Please refer to those files for details.
*
* JavaParser is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*/
package com.github.javaparser.symbolsolver.javaparsermodel;
import static com.github.javaparser.symbolsolver.javaparser.Navigator.demandParentNode;
import static com.github.javaparser.symbolsolver.model.resolution.SymbolReference.solved;
import static com.github.javaparser.symbolsolver.model.resolution.SymbolReference.unsolved;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.WeakHashMap;
import java.util.stream.Collectors;
import com.github.javaparser.ast.CompilationUnit;
import com.github.javaparser.ast.DataKey;
import com.github.javaparser.ast.Node;
import com.github.javaparser.ast.NodeList;
import com.github.javaparser.ast.body.ClassOrInterfaceDeclaration;
import com.github.javaparser.ast.body.EnumDeclaration;
import com.github.javaparser.ast.body.TypeDeclaration;
import com.github.javaparser.ast.body.VariableDeclarator;
import com.github.javaparser.ast.expr.AnnotationExpr;
import com.github.javaparser.ast.expr.BinaryExpr;
import com.github.javaparser.ast.expr.Expression;
import com.github.javaparser.ast.expr.FieldAccessExpr;
import com.github.javaparser.ast.expr.LambdaExpr;
import com.github.javaparser.ast.expr.MethodCallExpr;
import com.github.javaparser.ast.expr.MethodReferenceExpr;
import com.github.javaparser.ast.expr.NameExpr;
import com.github.javaparser.ast.expr.ObjectCreationExpr;
import com.github.javaparser.ast.expr.SimpleName;
import com.github.javaparser.ast.expr.ThisExpr;
import com.github.javaparser.ast.expr.TypeExpr;
import com.github.javaparser.ast.stmt.ExplicitConstructorInvocationStmt;
import com.github.javaparser.ast.type.ArrayType;
import com.github.javaparser.ast.type.ClassOrInterfaceType;
import com.github.javaparser.ast.type.PrimitiveType;
import com.github.javaparser.ast.type.Type;
import com.github.javaparser.ast.type.UnionType;
import com.github.javaparser.ast.type.VarType;
import com.github.javaparser.ast.type.VoidType;
import com.github.javaparser.ast.type.WildcardType;
import com.github.javaparser.resolution.MethodAmbiguityException;
import com.github.javaparser.resolution.MethodUsage;
import com.github.javaparser.resolution.UnsolvedSymbolException;
import com.github.javaparser.resolution.declarations.ResolvedAnnotationDeclaration;
import com.github.javaparser.resolution.declarations.ResolvedClassDeclaration;
import com.github.javaparser.resolution.declarations.ResolvedConstructorDeclaration;
import com.github.javaparser.resolution.declarations.ResolvedMethodDeclaration;
import com.github.javaparser.resolution.declarations.ResolvedReferenceTypeDeclaration;
import com.github.javaparser.resolution.declarations.ResolvedTypeDeclaration;
import com.github.javaparser.resolution.declarations.ResolvedTypeParameterDeclaration;
import com.github.javaparser.resolution.declarations.ResolvedValueDeclaration;
import com.github.javaparser.resolution.types.ResolvedArrayType;
import com.github.javaparser.resolution.types.ResolvedPrimitiveType;
import com.github.javaparser.resolution.types.ResolvedReferenceType;
import com.github.javaparser.resolution.types.ResolvedType;
import com.github.javaparser.resolution.types.ResolvedTypeVariable;
import com.github.javaparser.resolution.types.ResolvedUnionType;
import com.github.javaparser.resolution.types.ResolvedVoidType;
import com.github.javaparser.resolution.types.ResolvedWildcard;
import com.github.javaparser.symbolsolver.core.resolution.Context;
import com.github.javaparser.symbolsolver.javaparsermodel.contexts.FieldAccessContext;
import com.github.javaparser.symbolsolver.javaparsermodel.declarations.JavaParserAnonymousClassDeclaration;
import com.github.javaparser.symbolsolver.javaparsermodel.declarations.JavaParserEnumDeclaration;
import com.github.javaparser.symbolsolver.javaparsermodel.declarations.JavaParserTypeVariableDeclaration;
import com.github.javaparser.symbolsolver.model.resolution.SymbolReference;
import com.github.javaparser.symbolsolver.model.resolution.TypeSolver;
import com.github.javaparser.symbolsolver.model.typesystem.ReferenceTypeImpl;
import com.github.javaparser.symbolsolver.reflectionmodel.ReflectionClassDeclaration;
import com.github.javaparser.symbolsolver.resolution.ConstructorResolutionLogic;
import com.github.javaparser.symbolsolver.resolution.MethodResolutionLogic;
import com.github.javaparser.symbolsolver.resolution.SymbolSolver;
import com.github.javaparser.utils.Log;
/**
* Class to be used by final users to solve symbols for JavaParser ASTs.
*
* @author Federico Tomassetti
*/
public class JavaParserFacade {
private static final DataKey<ResolvedType> TYPE_WITH_LAMBDAS_RESOLVED = new DataKey<ResolvedType>() {
};
private static final DataKey<ResolvedType> TYPE_WITHOUT_LAMBDAS_RESOLVED = new DataKey<ResolvedType>() {
};
private static final Map<TypeSolver, JavaParserFacade> instances = new WeakHashMap<>();
private final TypeSolver typeSolver;
private final TypeExtractor typeExtractor;
private final SymbolSolver symbolSolver;
private JavaParserFacade(TypeSolver typeSolver) {
this.typeSolver = typeSolver.getRoot();
this.symbolSolver = new SymbolSolver(typeSolver);
this.typeExtractor = new TypeExtractor(typeSolver, this);
}
public TypeSolver getTypeSolver() {
return typeSolver;
}
public SymbolSolver getSymbolSolver() {
return symbolSolver;
}
/**
* Note that the addition of the modifier {@code synchronized} is specific and directly in response to issue #2668.
* <br>This <strong>MUST NOT</strong> be misinterpreted as a signal that JavaParser is safe to use within a multi-threaded environment.
* <br>
* <br>Additional discussion and context from a user attempting multithreading can be found within issue #2671 .
* <br>
*
* @see <a href="https://github.com/javaparser/javaparser/issues/2668">https://github.com/javaparser/javaparser/issues/2668</a>
* @see <a href="https://github.com/javaparser/javaparser/issues/2671">https://github.com/javaparser/javaparser/issues/2671</a>
*/
public synchronized static JavaParserFacade get(TypeSolver typeSolver) {
return instances.computeIfAbsent(typeSolver, JavaParserFacade::new);
}
/**
* This method is used to clear internal caches for the sake of releasing memory.
*/
public static void clearInstances() {
instances.clear();
}
protected static ResolvedType solveGenericTypes(ResolvedType type, Context context) {
if (type.isTypeVariable()) {
return context.solveGenericType(type.describe()).orElse(type);
}
if (type.isWildcard()) {
if (type.asWildcard().isExtends() || type.asWildcard().isSuper()) {
ResolvedWildcard wildcardUsage = type.asWildcard();
ResolvedType boundResolved = solveGenericTypes(wildcardUsage.getBoundedType(), context);
if (wildcardUsage.isExtends()) {
return ResolvedWildcard.extendsBound(boundResolved);
} else {
return ResolvedWildcard.superBound(boundResolved);
}
}
}
return type;
}
public SymbolReference<? extends ResolvedValueDeclaration> solve(NameExpr nameExpr) {
return symbolSolver.solveSymbol(nameExpr.getName().getId(), nameExpr);
}
public SymbolReference<? extends ResolvedValueDeclaration> solve(SimpleName nameExpr) {
return symbolSolver.solveSymbol(nameExpr.getId(), nameExpr);
}
public SymbolReference<? extends ResolvedValueDeclaration> solve(Expression expr) {
return expr.toNameExpr().map(this::solve).orElseThrow(() -> new IllegalArgumentException(expr.getClass().getCanonicalName()));
}
public SymbolReference<ResolvedMethodDeclaration> solve(MethodCallExpr methodCallExpr) {
return solve(methodCallExpr, true);
}
public SymbolReference<ResolvedMethodDeclaration> solve(MethodReferenceExpr methodReferenceExpr) {
return solve(methodReferenceExpr, true);
}
public SymbolReference<ResolvedConstructorDeclaration> solve(ObjectCreationExpr objectCreationExpr) {
return solve(objectCreationExpr, true);
}
public SymbolReference<ResolvedConstructorDeclaration> solve(ExplicitConstructorInvocationStmt explicitConstructorInvocationStmt) {
return solve(explicitConstructorInvocationStmt, true);
}
public SymbolReference<ResolvedConstructorDeclaration> solve(ExplicitConstructorInvocationStmt explicitConstructorInvocationStmt, boolean solveLambdas) {
// Constructor invocation must exist within a class (not interface).
Optional<ClassOrInterfaceDeclaration> optAncestorClassOrInterfaceNode = explicitConstructorInvocationStmt.findAncestor(ClassOrInterfaceDeclaration.class);
if (!optAncestorClassOrInterfaceNode.isPresent()) {
return unsolved(ResolvedConstructorDeclaration.class);
}
ClassOrInterfaceDeclaration classOrInterfaceNode = optAncestorClassOrInterfaceNode.get();
ResolvedReferenceTypeDeclaration resolvedClassNode = classOrInterfaceNode.resolve();
if (!resolvedClassNode.isClass()) {
throw new IllegalStateException("Expected to be a class -- cannot call this() or super() within an interface.");
}
ResolvedTypeDeclaration typeDecl = null;
if (explicitConstructorInvocationStmt.isThis()) {
// this()
typeDecl = resolvedClassNode.asReferenceType();
} else {
// super()
Optional<ResolvedReferenceType> superClass = resolvedClassNode.asClass().getSuperClass();
if (superClass.isPresent() && superClass.get().getTypeDeclaration().isPresent()) {
typeDecl = superClass.get().getTypeDeclaration().get();
}
}
if (typeDecl == null) {
return unsolved(ResolvedConstructorDeclaration.class);
}
// Solve each of the arguments being passed into this constructor invocation.
List<ResolvedType> argumentTypes = new LinkedList<>();
List<LambdaArgumentTypePlaceholder> placeholders = new LinkedList<>();
solveArguments(explicitConstructorInvocationStmt, explicitConstructorInvocationStmt.getArguments(), solveLambdas, argumentTypes, placeholders);
// Determine which constructor is referred to, and return it.
SymbolReference<ResolvedConstructorDeclaration> res = ConstructorResolutionLogic.findMostApplicable(((ResolvedClassDeclaration) typeDecl).getConstructors(), argumentTypes, typeSolver);
for (LambdaArgumentTypePlaceholder placeholder : placeholders) {
placeholder.setMethod(res);
}
return res;
}
public SymbolReference<ResolvedTypeDeclaration> solve(ThisExpr node) {
// If 'this' is prefixed by a class eg. MyClass.this
if (node.getTypeName().isPresent()) {
// Get the class name
String className = node.getTypeName().get().asString();
// Attempt to resolve using a typeSolver
SymbolReference<ResolvedReferenceTypeDeclaration> clazz = typeSolver.tryToSolveType(className);
if (clazz.isSolved()) {
return solved(clazz.getCorrespondingDeclaration());
}
// Attempt to resolve locally in Compilation unit
Optional<CompilationUnit> cu = node.findAncestor(CompilationUnit.class);
if (cu.isPresent()) {
Optional<ClassOrInterfaceDeclaration> classByName = cu.get().getClassByName(className);
if (classByName.isPresent()) {
return solved(getTypeDeclaration(classByName.get()));
}
}
}
return solved(getTypeDeclaration(findContainingTypeDeclOrObjectCreationExpr(node)));
}
/**
* Given a constructor call find out to which constructor declaration it corresponds.
*/
public SymbolReference<ResolvedConstructorDeclaration> solve(ObjectCreationExpr objectCreationExpr, boolean solveLambdas) {
List<ResolvedType> argumentTypes = new LinkedList<>();
List<LambdaArgumentTypePlaceholder> placeholders = new LinkedList<>();
solveArguments(objectCreationExpr, objectCreationExpr.getArguments(), solveLambdas, argumentTypes, placeholders);
ResolvedReferenceTypeDeclaration typeDecl = null;
if (objectCreationExpr.getAnonymousClassBody().isPresent()) {
typeDecl = new JavaParserAnonymousClassDeclaration(objectCreationExpr, typeSolver);
} else {
ResolvedType classDecl = JavaParserFacade.get(typeSolver).convert(objectCreationExpr.getType(), objectCreationExpr);
if (classDecl.isReferenceType() && classDecl.asReferenceType().getTypeDeclaration().isPresent()) {
typeDecl = classDecl.asReferenceType().getTypeDeclaration().get();
}
}
if (typeDecl == null) {
return unsolved(ResolvedConstructorDeclaration.class);
}
SymbolReference<ResolvedConstructorDeclaration> res = ConstructorResolutionLogic.findMostApplicable(typeDecl.getConstructors(), argumentTypes, typeSolver);
for (LambdaArgumentTypePlaceholder placeholder : placeholders) {
placeholder.setMethod(res);
}
return res;
}
private void solveArguments(Node node, NodeList<Expression> args, boolean solveLambdas, List<ResolvedType> argumentTypes,
List<LambdaArgumentTypePlaceholder> placeholders) {
int i = 0;
for (Expression parameterValue : args) {
if (parameterValue instanceof LambdaExpr || parameterValue instanceof MethodReferenceExpr) {
LambdaArgumentTypePlaceholder placeholder = new LambdaArgumentTypePlaceholder(i);
argumentTypes.add(placeholder);
placeholders.add(placeholder);
} else {
try {
argumentTypes.add(JavaParserFacade.get(typeSolver).getType(parameterValue, solveLambdas));
} catch (UnsolvedSymbolException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(String.format("Unable to calculate the type of a parameter of a method call. Method call: %s, Parameter: %s",
node, parameterValue), e);
}
}
i++;
}
}
/**
* Given a method call find out to which method declaration it corresponds.
*/
public SymbolReference<ResolvedMethodDeclaration> solve(MethodCallExpr methodCallExpr, boolean solveLambdas) {
List<ResolvedType> argumentTypes = new LinkedList<>();
List<LambdaArgumentTypePlaceholder> placeholders = new LinkedList<>();
solveArguments(methodCallExpr, methodCallExpr.getArguments(), solveLambdas, argumentTypes, placeholders);
SymbolReference<ResolvedMethodDeclaration> res = JavaParserFactory.getContext(methodCallExpr, typeSolver).solveMethod(methodCallExpr.getName().getId(), argumentTypes, false);
for (LambdaArgumentTypePlaceholder placeholder : placeholders) {
placeholder.setMethod(res);
}
return res;
}
/**
* Given a method reference find out to which method declaration it corresponds.
*/
public SymbolReference<ResolvedMethodDeclaration> solve(MethodReferenceExpr methodReferenceExpr, boolean solveLambdas) {
// pass empty argument list to be populated
List<ResolvedType> argumentTypes = new LinkedList<>();
return JavaParserFactory.getContext(methodReferenceExpr, typeSolver).solveMethod(methodReferenceExpr.getIdentifier(), argumentTypes, false);
}
public SymbolReference<ResolvedAnnotationDeclaration> solve(AnnotationExpr annotationExpr) {
Context context = JavaParserFactory.getContext(annotationExpr, typeSolver);
SymbolReference<ResolvedTypeDeclaration> typeDeclarationSymbolReference = context.solveType(annotationExpr.getNameAsString());
if (typeDeclarationSymbolReference.isSolved()) {
ResolvedAnnotationDeclaration annotationDeclaration = (ResolvedAnnotationDeclaration) typeDeclarationSymbolReference.getCorrespondingDeclaration();
return solved(annotationDeclaration);
} else {
return unsolved(ResolvedAnnotationDeclaration.class);
}
}
public SymbolReference<ResolvedValueDeclaration> solve(FieldAccessExpr fieldAccessExpr) {
return ((FieldAccessContext) JavaParserFactory.getContext(fieldAccessExpr, typeSolver)).solveField(fieldAccessExpr.getName().getId());
}
/**
* Get the type associated with the node.
* <p>
* This method was originally intended to get the type of a value: any value has a type.
* <p>
* For example:
* <pre>
* int foo(int a) {
* return a; // when getType is invoked on "a" it returns the type "int"
* }
* </pre>
* <p>
* Now, users started using also of names of types itself, which do not have a type.
* <p>
* For example:
* <pre>
* class A {
* int foo(int a) {
* return A.someStaticField; // when getType is invoked on "A", which represents a class, it returns
* // the type "A" itself while it used to throw UnsolvedSymbolException
* }
* </pre>
* <p>
* To accomodate this usage and avoid confusion this method return
* the type itself when used on the name of type.
*/
public ResolvedType getType(Node node) {
try {
return getType(node, true);
} catch (UnsolvedSymbolException e) {
if (node instanceof NameExpr) {
NameExpr nameExpr = (NameExpr) node;
SymbolReference<ResolvedTypeDeclaration> typeDeclaration = JavaParserFactory.getContext(node, typeSolver)
.solveType(nameExpr.getNameAsString());
if (typeDeclaration.isSolved() && typeDeclaration.getCorrespondingDeclaration() instanceof ResolvedReferenceTypeDeclaration) {
ResolvedReferenceTypeDeclaration resolvedReferenceTypeDeclaration = (ResolvedReferenceTypeDeclaration) typeDeclaration.getCorrespondingDeclaration();
return ReferenceTypeImpl.undeterminedParameters(resolvedReferenceTypeDeclaration, typeSolver);
}
}
throw e;
}
}
public ResolvedType getType(Node node, boolean solveLambdas) {
if (solveLambdas) {
if (!node.containsData(TYPE_WITH_LAMBDAS_RESOLVED)) {
ResolvedType res = getTypeConcrete(node, solveLambdas);
node.setData(TYPE_WITH_LAMBDAS_RESOLVED, res);
boolean secondPassNecessary = false;
if (node instanceof MethodCallExpr) {
MethodCallExpr methodCallExpr = (MethodCallExpr) node;
for (Node arg : methodCallExpr.getArguments()) {
if (!arg.containsData(TYPE_WITH_LAMBDAS_RESOLVED)) {
getType(arg, true);
secondPassNecessary = true;
}
}
}
if (secondPassNecessary) {
node.removeData(TYPE_WITH_LAMBDAS_RESOLVED);
ResolvedType type = getType(node, true);
node.setData(TYPE_WITH_LAMBDAS_RESOLVED, type);
}
Log.trace("getType on %s -> %s", () -> node, () -> res);
}
return node.getData(TYPE_WITH_LAMBDAS_RESOLVED);
} else {
Optional<ResolvedType> res = find(TYPE_WITH_LAMBDAS_RESOLVED, node);
if (res.isPresent()) {
return res.get();
}
res = find(TYPE_WITHOUT_LAMBDAS_RESOLVED, node);
if (!res.isPresent()) {
ResolvedType resType = getTypeConcrete(node, solveLambdas);
node.setData(TYPE_WITHOUT_LAMBDAS_RESOLVED, resType);
Optional<ResolvedType> finalRes = res;
Log.trace("getType on %s (no solveLambdas) -> %s", () -> node, () -> finalRes);
return resType;
}
return res.get();
}
}
private Optional<ResolvedType> find(DataKey<ResolvedType> dataKey, Node node) {
if (node.containsData(dataKey)) {
return Optional.of(node.getData(dataKey));
}
return Optional.empty();
}
protected MethodUsage toMethodUsage(MethodReferenceExpr methodReferenceExpr, List<ResolvedType> paramTypes) {
Expression scope = methodReferenceExpr.getScope();
ResolvedType typeOfScope = getType(methodReferenceExpr.getScope());
if (!typeOfScope.isReferenceType()) {
throw new UnsupportedOperationException(typeOfScope.getClass().getCanonicalName());
}
Optional<MethodUsage> result;
Set<MethodUsage> allMethods = typeOfScope.asReferenceType().getTypeDeclaration()
.orElseThrow(() -> new RuntimeException("TypeDeclaration unexpectedly empty."))
.getAllMethods();
if (scope instanceof TypeExpr) {
// static methods should match all params
List<MethodUsage> staticMethodUsages = allMethods.stream()
.filter(it -> it.getDeclaration().isStatic())
.collect(Collectors.toList());
result = MethodResolutionLogic.findMostApplicableUsage(staticMethodUsages, methodReferenceExpr.getIdentifier(), paramTypes, typeSolver);
if (!paramTypes.isEmpty()) {
// instance methods are called on the first param and should match all other params
List<MethodUsage> instanceMethodUsages = allMethods.stream()
.filter(it -> !it.getDeclaration().isStatic())
.collect(Collectors.toList());
List<ResolvedType> instanceMethodParamTypes = new ArrayList<>(paramTypes);
instanceMethodParamTypes.remove(0); // remove the first one
Optional<MethodUsage> instanceResult = MethodResolutionLogic.findMostApplicableUsage(
instanceMethodUsages, methodReferenceExpr.getIdentifier(), instanceMethodParamTypes, typeSolver);
if (result.isPresent() && instanceResult.isPresent()) {
throw new MethodAmbiguityException("Ambiguous method call: cannot find a most applicable method for " + methodReferenceExpr.getIdentifier());
}
if (instanceResult.isPresent()) {
result = instanceResult;
}
}
} else {
result = MethodResolutionLogic.findMostApplicableUsage(new ArrayList<>(allMethods), methodReferenceExpr.getIdentifier(), paramTypes, typeSolver);
if (result.isPresent() && result.get().getDeclaration().isStatic()) {
throw new RuntimeException("Invalid static method reference " + methodReferenceExpr.getIdentifier());
}
}
if (!result.isPresent()) {
throw new UnsupportedOperationException();
}
return result.get();
}
protected ResolvedType getBinaryTypeConcrete(Node left, Node right, boolean solveLambdas, BinaryExpr.Operator operator) {
ResolvedType leftType = getTypeConcrete(left, solveLambdas);
ResolvedType rightType = getTypeConcrete(right, solveLambdas);
// JLS 15.18.1. String Concatenation Operator +
// If only one operand expression is of type String, then string conversion (§5.1.11) is performed on the other
// operand to produce a string at run time.
//
// The result of string concatenation is a reference to a String object that is the concatenation of the two
// operand strings. The characters of the left-hand operand precede the characters of the right-hand operand in
// the newly created string.
if (operator == BinaryExpr.Operator.PLUS) {
boolean isLeftString = leftType.isReferenceType() && leftType.asReferenceType()
.getQualifiedName().equals(String.class.getCanonicalName());
boolean isRightString = rightType.isReferenceType() && rightType.asReferenceType()
.getQualifiedName().equals(String.class.getCanonicalName());
if (isLeftString || isRightString) {
return isLeftString ? leftType : rightType;
}
}
// JLS 5.6.2. Binary Numeric Promotion
//
// Widening primitive conversion (§5.1.2) is applied to convert either or both operands as specified by the
// following rules:
//
// * If either operand is of type double, the other is converted to double.
// * Otherwise, if either operand is of type float, the other is converted to float.
// * Otherwise, if either operand is of type long, the other is converted to long.
// * Otherwise, both operands are converted to type int.
boolean isLeftNumeric = leftType.isPrimitive() && leftType.asPrimitive().isNumeric();
boolean isRightNumeric = rightType.isPrimitive() && rightType.asPrimitive().isNumeric();
if (isLeftNumeric && isRightNumeric) {
return leftType.asPrimitive().bnp(rightType.asPrimitive());
}
if (rightType.isAssignableBy(leftType)) {
return rightType;
}
return leftType;
}
/**
* Should return more like a TypeApplication: a TypeDeclaration and possible typeParametersValues or array
* modifiers.
*/
private ResolvedType getTypeConcrete(Node node, boolean solveLambdas) {
if (node == null) throw new IllegalArgumentException();
return node.accept(typeExtractor, solveLambdas);
}
/**
* Where a node has an interface/class/enum declaration as its ancestor, return the nearest one.
* <p>
* NOTE: See {@link #findContainingTypeDeclOrObjectCreationExpr} if wanting to include anonymous inner classes.
* <p>
* For example, these all return X:
* {@code public interface X { ... node here ... }}
* {@code public class X { ... node here ... }}
* {@code public enum X { ... node here ... }}
*
* @param node The Node whose ancestors will be traversed,
* @return The first class/interface/enum declaration in the Node's ancestry.
*/
protected TypeDeclaration<?> findContainingTypeDecl(Node node) {
if (node instanceof ClassOrInterfaceDeclaration) {
return (ClassOrInterfaceDeclaration) node;
}
if (node instanceof EnumDeclaration) {
return (EnumDeclaration) node;
}
return findContainingTypeDecl(demandParentNode(node));
}
/**
* Where a node has an interface/class/enum declaration -- or an object creation expression (anonymous inner class)
* -- as its ancestor, return the nearest one.
* <p>
* NOTE: See {@link #findContainingTypeDecl} if wanting to not include anonymous inner classes.
* <p>
* For example, these all return X:
* <ul>
* <li>{@code public interface X { ... node here ... }}</li>
* <li>{@code public class X { ... node here ... }}</li>
* <li>{@code public enum X { ... node here ... }}</li>
* <li><pre>{@code
* new ActionListener() {
* ... node here ...
* public void actionPerformed(ActionEvent e) {
* ... or node here ...
* }
* }
* }</pre></li>
* </ul>
* <p>
*
* @param node The Node whose ancestors will be traversed,
* @return The first class/interface/enum declaration -- or object creation expression (anonymous inner class) -- in
* the Node's ancestry.
*/
protected Node findContainingTypeDeclOrObjectCreationExpr(Node node) {
if (node instanceof ClassOrInterfaceDeclaration) {
return node;
}
if (node instanceof EnumDeclaration) {
return node;
}
Node parent = demandParentNode(node);
if (parent instanceof ObjectCreationExpr && !((ObjectCreationExpr) parent).getArguments().contains(node)) {
return parent;
}
return findContainingTypeDeclOrObjectCreationExpr(parent);
}
/**
* Where a node has an interface/class/enum declaration -- or an object creation expression in an inner class
* references an outer class -- as its ancestor, return the declaration corresponding to the class name specified.
*/
protected Node findContainingTypeDeclOrObjectCreationExpr(Node node, String className) {
if (node instanceof ClassOrInterfaceDeclaration && ((ClassOrInterfaceDeclaration) node).getFullyQualifiedName().get().equals(className)) {
return node;
}
if (node instanceof EnumDeclaration) {
return node;
}
Node parent = demandParentNode(node);
if (parent instanceof ObjectCreationExpr && !((ObjectCreationExpr) parent).getArguments().contains(node)) {
return parent;
}
return findContainingTypeDeclOrObjectCreationExpr(parent, className);
}
public ResolvedType convertToUsageVariableType(VariableDeclarator var) {
return get(typeSolver).convertToUsage(var.getType(), var);
}
public ResolvedType convertToUsage(Type type, Node context) {
if (type.isUnknownType()) {
throw new IllegalArgumentException("Inferred lambda parameter type");
}
return convertToUsage(type, JavaParserFactory.getContext(context, typeSolver));
}
public ResolvedType convertToUsage(Type type) {
return convertToUsage(type, type);
}
// This is an hack around an issue in JavaParser
private String qName(ClassOrInterfaceType classOrInterfaceType) {
String name = classOrInterfaceType.getName().getId();
if (classOrInterfaceType.getScope().isPresent()) {
return qName(classOrInterfaceType.getScope().get()) + "." + name;
}
return name;
}
protected ResolvedType convertToUsage(Type type, Context context) {
if (context == null) {
throw new NullPointerException("Context should not be null");
}
if (type instanceof ClassOrInterfaceType) {
ClassOrInterfaceType classOrInterfaceType = (ClassOrInterfaceType) type;
String name = qName(classOrInterfaceType);
SymbolReference<ResolvedTypeDeclaration> ref = context.solveType(name);
if (!ref.isSolved()) {
throw new UnsolvedSymbolException(name);
}
ResolvedTypeDeclaration typeDeclaration = ref.getCorrespondingDeclaration();
List<ResolvedType> typeParameters = Collections.emptyList();
if (classOrInterfaceType.getTypeArguments().isPresent()) {
typeParameters = classOrInterfaceType.getTypeArguments().get().stream().map((pt) -> convertToUsage(pt, context)).collect(Collectors.toList());
}
if (typeDeclaration.isTypeParameter()) {
if (typeDeclaration instanceof ResolvedTypeParameterDeclaration) {
return new ResolvedTypeVariable((ResolvedTypeParameterDeclaration) typeDeclaration);
} else {
JavaParserTypeVariableDeclaration javaParserTypeVariableDeclaration = (JavaParserTypeVariableDeclaration) typeDeclaration;
return new ResolvedTypeVariable(javaParserTypeVariableDeclaration.asTypeParameter());
}
} else {
return new ReferenceTypeImpl((ResolvedReferenceTypeDeclaration) typeDeclaration, typeParameters, typeSolver);
}
} else if (type instanceof PrimitiveType) {
return ResolvedPrimitiveType.byName(((PrimitiveType) type).getType().name());
} else if (type instanceof WildcardType) {
WildcardType wildcardType = (WildcardType) type;
if (wildcardType.getExtendedType().isPresent() && !wildcardType.getSuperType().isPresent()) {
return ResolvedWildcard.extendsBound(convertToUsage(wildcardType.getExtendedType().get(), context)); // removed (ReferenceTypeImpl)
} else if (!wildcardType.getExtendedType().isPresent() && wildcardType.getSuperType().isPresent()) {
return ResolvedWildcard.superBound(convertToUsage(wildcardType.getSuperType().get(), context)); // removed (ReferenceTypeImpl)
} else if (!wildcardType.getExtendedType().isPresent() && !wildcardType.getSuperType().isPresent()) {
return ResolvedWildcard.UNBOUNDED;
} else {
throw new UnsupportedOperationException(wildcardType.toString());
}
} else if (type instanceof VoidType) {
return ResolvedVoidType.INSTANCE;
} else if (type instanceof ArrayType) {
ArrayType jpArrayType = (ArrayType) type;
return new ResolvedArrayType(convertToUsage(jpArrayType.getComponentType(), context));
} else if (type instanceof UnionType) {
UnionType unionType = (UnionType) type;
return new ResolvedUnionType(unionType.getElements().stream().map(el -> convertToUsage(el, context)).collect(Collectors.toList()));
} else if (type instanceof VarType) {
Node parent = type.getParentNode().get();
if (!(parent instanceof VariableDeclarator)) {
throw new IllegalStateException("Trying to resolve a `var` which is not in a variable declaration.");
}
final VariableDeclarator variableDeclarator = (VariableDeclarator) parent;
return variableDeclarator.getInitializer()
.map(Expression::calculateResolvedType)
.orElseThrow(() -> new IllegalStateException("Cannot resolve `var` which has no initializer."));
} else {
throw new UnsupportedOperationException(type.getClass().getCanonicalName());
}
}
public ResolvedType convert(Type type, Node node) {
return convert(type, JavaParserFactory.getContext(node, typeSolver));
}
public ResolvedType convert(Type type, Context context) {
return convertToUsage(type, context);
}
public MethodUsage solveMethodAsUsage(MethodCallExpr call) {
List<ResolvedType> params = new ArrayList<>();
if (call.getArguments() != null) {
for (Expression param : call.getArguments()) {
//getTypeConcrete(Node node, boolean solveLambdas)
try {
params.add(getType(param, false));
} catch (Exception e) {
throw new RuntimeException(String.format("Error calculating the type of parameter %s of method call %s", param, call), e);
}
//params.add(getTypeConcrete(param, false));
}
}
Context context = JavaParserFactory.getContext(call, typeSolver);
Optional<MethodUsage> methodUsage = context.solveMethodAsUsage(call.getName().getId(), params);
if (!methodUsage.isPresent()) {
throw new RuntimeException("Method '" + call.getName() + "' cannot be resolved in context "
+ call + " (line: " + call.getRange().map(r -> "" + r.begin.line).orElse("??") + ") " + context + ". Parameter types: " + params);
}
return methodUsage.get();
}
public ResolvedReferenceTypeDeclaration getTypeDeclaration(Node node) {
if (node instanceof TypeDeclaration) {
return getTypeDeclaration((TypeDeclaration) node);
} else if (node instanceof ObjectCreationExpr) {
return new JavaParserAnonymousClassDeclaration((ObjectCreationExpr) node, typeSolver);
} else {
throw new IllegalArgumentException();
}
}
public ResolvedReferenceTypeDeclaration getTypeDeclaration(ClassOrInterfaceDeclaration classOrInterfaceDeclaration) {
return JavaParserFactory.toTypeDeclaration(classOrInterfaceDeclaration, typeSolver);
}
/**
* "this" inserted in the given point, which type would have?
*/
public ResolvedType getTypeOfThisIn(Node node) {
// TODO consider static methods
if (node instanceof ClassOrInterfaceDeclaration) {
return new ReferenceTypeImpl(getTypeDeclaration((ClassOrInterfaceDeclaration) node), typeSolver);
} else if (node instanceof EnumDeclaration) {
JavaParserEnumDeclaration enumDeclaration = new JavaParserEnumDeclaration((EnumDeclaration) node, typeSolver);
return new ReferenceTypeImpl(enumDeclaration, typeSolver);
} else if (node instanceof ObjectCreationExpr && ((ObjectCreationExpr) node).getAnonymousClassBody().isPresent()) {
JavaParserAnonymousClassDeclaration anonymousDeclaration = new JavaParserAnonymousClassDeclaration((ObjectCreationExpr) node, typeSolver);
return new ReferenceTypeImpl(anonymousDeclaration, typeSolver);
}
return getTypeOfThisIn(demandParentNode(node));
}
public ResolvedReferenceTypeDeclaration getTypeDeclaration(TypeDeclaration<?> typeDeclaration) {
return JavaParserFactory.toTypeDeclaration(typeDeclaration, typeSolver);
}
public ResolvedType classToResolvedType(Class<?> clazz) {
if (clazz.isPrimitive()) {
return ResolvedPrimitiveType.byName(clazz.getName());
}
return new ReferenceTypeImpl(new ReflectionClassDeclaration(clazz, typeSolver), typeSolver);
}
}
| 1 | 14,342 | I was thinking about this for #2928 too, which also uses `endsWith`... Presumably `SomeOtherObject` would match `endsWith("Object")`, meaning that something more sophisticated like splitting it then iterating right to left would be needed in order to do this robustly? | javaparser-javaparser | java |
@@ -51,5 +51,15 @@ namespace OpenTelemetry.Exporter.Zipkin
/// </summary>
public int? MaxPayloadSizeInBytes { get; set; } = DefaultMaxPayloadSizeInBytes;
#endif
+
+ /// <summary>
+ /// Gets or sets the exporter type for Zipkin Exporter.
+ /// </summary>
+ public ExporterType ExporterType { get; set; } = ExporterType.BatchExportProcessor;
+
+ /// <summary>
+ /// Gets or sets get or sets the BatchExportProcessor options.
+ /// </summary>
+ public BatchExportProcessorOptions BatchExportProcessorOptions { get; set; } = new BatchExportProcessorOptions();
}
} | 1 | // <copyright file="ZipkinExporterOptions.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
namespace OpenTelemetry.Exporter.Zipkin
{
/// <summary>
/// Zipkin trace exporter options.
/// </summary>
public sealed class ZipkinExporterOptions
{
internal const string DefaultServiceName = "OpenTelemetry Exporter";
#if !NET452
internal const int DefaultMaxPayloadSizeInBytes = 4096;
#endif
/// <summary>
/// Gets or sets the name of the service reporting telemetry.
/// </summary>
public string ServiceName { get; set; } = DefaultServiceName;
/// <summary>
/// Gets or sets Zipkin endpoint address. See https://zipkin.io/zipkin-api/#/default/post_spans.
/// Typically https://zipkin-server-name:9411/api/v2/spans.
/// </summary>
public Uri Endpoint { get; set; } = new Uri("http://localhost:9411/api/v2/spans");
/// <summary>
/// Gets or sets a value indicating whether short trace id should be used.
/// </summary>
public bool UseShortTraceIds { get; set; }
#if !NET452
/// <summary>
/// Gets or sets the maximum payload size in bytes. Default value: 4096.
/// </summary>
public int? MaxPayloadSizeInBytes { get; set; } = DefaultMaxPayloadSizeInBytes;
#endif
}
}
| 1 | 18,070 | I think this name is a bit confusing because if you asked someone what type of exporter they were using they would probably say Zipkin or Jaeger, etc. How about `ProcessorType` with `Simple` and `Batch` definitions? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -95,6 +95,12 @@ func (a *FakeWebAPI) RegisterPiped(ctx context.Context, req *webservice.Register
}, nil
}
+func (a *FakeWebAPI) RecreatePipedKey(ctx context.Context, req *webservice.RecreatePipedKeyRequest) (*webservice.RecreatePipedKeyResponse, error) {
+ return &webservice.RecreatePipedKeyResponse{
+ Key: "9bf9752a-54a2-451a-a541-444add56f96b",
+ }, nil
+}
+
func (a *FakeWebAPI) EnablePiped(ctx context.Context, req *webservice.EnablePipedRequest) (*webservice.EnablePipedResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
} | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/pipe-cd/pipe/pkg/app/api/service/webservice"
"github.com/pipe-cd/pipe/pkg/model"
)
const (
fakeProjectID = "debug-project"
)
// FakeWebAPI implements the fake behaviors for the gRPC definitions of WebAPI.
type FakeWebAPI struct {
}
// NewFakeWebAPI creates a new FakeWebAPI instance.
func NewFakeWebAPI() *FakeWebAPI {
return &FakeWebAPI{}
}
// Register registers all handling of this service into the specified gRPC server.
func (a *FakeWebAPI) Register(server *grpc.Server) {
webservice.RegisterWebServiceServer(server, a)
}
func (a *FakeWebAPI) AddEnvironment(ctx context.Context, req *webservice.AddEnvironmentRequest) (*webservice.AddEnvironmentResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) UpdateEnvironmentDesc(ctx context.Context, req *webservice.UpdateEnvironmentDescRequest) (*webservice.UpdateEnvironmentDescResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) ListEnvironments(ctx context.Context, req *webservice.ListEnvironmentsRequest) (*webservice.ListEnvironmentsResponse, error) {
now := time.Now()
envs := []*model.Environment{
{
Id: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
Name: "development",
Desc: "For development",
ProjectId: fakeProjectID,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: fmt.Sprintf("%s:%s", fakeProjectID, "staging"),
Name: "staging",
Desc: "For staging",
ProjectId: fakeProjectID,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: fmt.Sprintf("%s:%s", fakeProjectID, "production"),
Name: "production",
Desc: "For production",
ProjectId: fakeProjectID,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}
return &webservice.ListEnvironmentsResponse{
Environments: envs,
}, nil
}
func (a *FakeWebAPI) RegisterPiped(ctx context.Context, req *webservice.RegisterPipedRequest) (*webservice.RegisterPipedResponse, error) {
return &webservice.RegisterPipedResponse{
Id: "e357d99f-0f83-4ce0-8c8b-27f11f432ef9",
Key: "9bf9752a-54a2-451a-a541-444add56f96b",
}, nil
}
func (a *FakeWebAPI) EnablePiped(ctx context.Context, req *webservice.EnablePipedRequest) (*webservice.EnablePipedResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) DisablePiped(ctx context.Context, req *webservice.DisablePipedRequest) (*webservice.DisablePipedResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) ListPipeds(ctx context.Context, req *webservice.ListPipedsRequest) (*webservice.ListPipedsResponse, error) {
now := time.Now()
pipeds := []*webservice.Piped{
{
Id: "492220b1-c080-4781-9e55-7e278760e0ef",
Desc: "piped for debug 1",
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "bdd71c9e-5406-46fb-a0e4-b2124ea1c1ea",
Desc: "piped for debug 2",
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "42e9fa90-22c1-4436-b10c-094044329c27",
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}
if req.WithStatus {
pipeds[0].Status = webservice.PipedConnectionStatus_PIPED_CONNECTION_ONLINE
pipeds[1].Status = webservice.PipedConnectionStatus_PIPED_CONNECTION_ONLINE
pipeds[2].Status = webservice.PipedConnectionStatus_PIPED_CONNECTION_OFFLINE
}
return &webservice.ListPipedsResponse{
Pipeds: pipeds,
}, nil
}
func (a *FakeWebAPI) GetPiped(ctx context.Context, req *webservice.GetPipedRequest) (*webservice.GetPipedResponse, error) {
now := time.Now()
return &webservice.GetPipedResponse{
Piped: &webservice.Piped{
Id: "492220b1-c080-4781-9e55-7e278760e0ef",
Desc: "piped for debug 1",
ProjectId: fakeProjectID,
Version: "debug-version",
StartedAt: now.Add(-30 * time.Minute).Unix(),
CloudProviders: []*model.Piped_CloudProvider{
{
Name: "kubernetes-default",
Type: model.CloudProviderKubernetes.String(),
},
},
RepositoryIds: []string{
"piped-repo-1",
"piped-repo-2",
},
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}, nil
}
func (a *FakeWebAPI) AddApplication(ctx context.Context, req *webservice.AddApplicationRequest) (*webservice.AddApplicationResponse, error) {
return &webservice.AddApplicationResponse{}, nil
}
func (a *FakeWebAPI) EnableApplication(ctx context.Context, req *webservice.EnableApplicationRequest) (*webservice.EnableApplicationResponse, error) {
return &webservice.EnableApplicationResponse{}, nil
}
func (a *FakeWebAPI) DisableApplication(ctx context.Context, req *webservice.DisableApplicationRequest) (*webservice.DisableApplicationResponse, error) {
return &webservice.DisableApplicationResponse{}, nil
}
func (a *FakeWebAPI) ListApplications(ctx context.Context, req *webservice.ListApplicationsRequest) (*webservice.ListApplicationsResponse, error) {
now := time.Now()
fakeApplications := []*model.Application{
{
Id: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
Name: "debug-app",
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
GitPath: &model.ApplicationGitPath{
RepoId: "debug",
Path: "k8s",
},
CloudProvider: "kubernetes-default",
MostRecentlySuccessfulDeployment: &model.ApplicationDeploymentReference{
DeploymentId: "debug-deployment-id-01",
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: now.Unix(),
},
Commander: "",
Timestamp: now.Unix(),
}, Version: "v0.1.0",
StartedAt: now.Add(-3 * 24 * time.Hour).Unix(),
CompletedAt: now.Add(-3 * 24 * time.Hour).Unix(),
},
SyncState: &model.ApplicationSyncState{
Status: model.ApplicationSyncStatus_SYNCED,
ShortReason: "Short resson",
Reason: "Reason",
HeadDeploymentId: "debug-deployment-id-01",
Timestamp: now.Unix(),
},
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}
return &webservice.ListApplicationsResponse{
Applications: fakeApplications,
}, nil
}
func (a *FakeWebAPI) SyncApplication(ctx context.Context, req *webservice.SyncApplicationRequest) (*webservice.SyncApplicationResponse, error) {
return &webservice.SyncApplicationResponse{
CommandId: uuid.New().String(),
}, nil
}
func (a *FakeWebAPI) GetApplication(ctx context.Context, req *webservice.GetApplicationRequest) (*webservice.GetApplicationResponse, error) {
now := time.Now()
application := model.Application{
Id: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
Name: "debug-app",
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
GitPath: &model.ApplicationGitPath{
RepoId: "debug",
Path: "k8s",
},
CloudProvider: "kubernetes-default",
MostRecentlySuccessfulDeployment: &model.ApplicationDeploymentReference{
DeploymentId: "debug-deployment-id-01",
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: now.Unix(),
},
Commander: "",
Timestamp: now.Unix(),
},
Version: "v0.1.0",
StartedAt: now.Add(-3 * 24 * time.Hour).Unix(),
CompletedAt: now.Add(-3 * 24 * time.Hour).Unix(),
},
SyncState: &model.ApplicationSyncState{
Status: model.ApplicationSyncStatus_SYNCED,
ShortReason: "Short resson",
Reason: "Reason",
HeadDeploymentId: "debug-deployment-id-01",
Timestamp: now.Unix(),
},
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
}
return &webservice.GetApplicationResponse{
Application: &application,
}, nil
}
func (a *FakeWebAPI) ListDeployments(ctx context.Context, req *webservice.ListDeploymentsRequest) (*webservice.ListDeploymentsResponse, error) {
now := time.Now()
deploymentTime := now
fakeDeployments := make([]*model.Deployment, 15)
for i := 0; i < 15; i++ {
// 5 hour intervals
deploymentTime := deploymentTime.Add(time.Duration(-5*i) * time.Hour)
fakeDeployments[i] = &model.Deployment{
Id: fmt.Sprintf("debug-deployment-id-%02d", i),
ApplicationId: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
GitPath: &model.ApplicationGitPath{
RepoId: "debug",
Path: "k8s",
},
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: deploymentTime.Unix(),
},
Commander: "",
Timestamp: deploymentTime.Unix(),
},
RunningCommitHash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Description: fmt.Sprintf("This deployment is debug-%02d", i),
Status: model.DeploymentStatus_DEPLOYMENT_SUCCESS,
Stages: []*model.PipelineStage{
{
Id: "fake-stage-id-0-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_RUNNING,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-1",
Name: model.StageK8sPrimaryRollout.String(),
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-2",
Name: model.StageK8sCanaryRollout.String(),
Index: 2,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_FAILURE,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-0",
Name: model.StageK8sCanaryClean.String(),
Desc: "waiting approval",
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-1",
Name: model.StageK8sCanaryClean.String(),
Desc: "approved by cakecatz",
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-3-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-2-0",
"fake-stage-id-2-1",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
},
CreatedAt: deploymentTime.Unix(),
UpdatedAt: deploymentTime.Unix(),
}
}
return &webservice.ListDeploymentsResponse{
Deployments: fakeDeployments,
}, nil
}
func (a *FakeWebAPI) GetDeployment(ctx context.Context, req *webservice.GetDeploymentRequest) (*webservice.GetDeploymentResponse, error) {
now := time.Now()
resp := &model.Deployment{
Id: "debug-deployment-id-01",
ApplicationId: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
GitPath: &model.ApplicationGitPath{
RepoId: "debug",
Path: "k8s",
},
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: now.Add(-30 * time.Minute).Unix(),
},
Commander: "cakecatz",
Timestamp: now.Add(-30 * time.Minute).Unix(),
},
RunningCommitHash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Description: "This deployment is debug",
Status: model.DeploymentStatus_DEPLOYMENT_RUNNING,
Stages: []*model.PipelineStage{
{
Id: "fake-stage-id-0-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_RUNNING,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-1",
Name: model.StageK8sPrimaryRollout.String(),
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-2",
Name: model.StageK8sCanaryRollout.String(),
Index: 2,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_FAILURE,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-0",
Name: model.StageK8sCanaryClean.String(),
Desc: "waiting approval",
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-1",
Name: model.StageK8sCanaryClean.String(),
Desc: "approved by cakecatz",
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-3-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-2-0",
"fake-stage-id-2-1",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
},
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
}
return &webservice.GetDeploymentResponse{
Deployment: resp,
}, nil
}
func (a *FakeWebAPI) GetStageLog(ctx context.Context, req *webservice.GetStageLogRequest) (*webservice.GetStageLogResponse, error) {
startTime := time.Now().Add(-10 * time.Minute)
resp := []*model.LogBlock{
{
Index: 1,
Log: "+ make build",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Unix(),
},
{
Index: 2,
Log: "bazelisk --output_base=/workspace/bazel_out build --config=ci -- //...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(5 * time.Second).Unix(),
},
{
Index: 3,
Log: "2020/06/01 08:52:07 Downloading https://releases.bazel.build/3.1.0/release/bazel-3.1.0-linux-x86_64...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(10 * time.Second).Unix(),
},
{
Index: 4,
Log: "Extracting Bazel installation...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(15 * time.Second).Unix(),
},
{
Index: 5,
Log: "Starting local Bazel server and connecting to it...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(20 * time.Second).Unix(),
},
{
Index: 6,
Log: "(08:52:14) Loading: 0 packages loaded",
Severity: model.LogSeverity_SUCCESS,
CreatedAt: startTime.Add(30 * time.Second).Unix(),
},
{
Index: 7,
Log: "(08:53:21) Analyzing: 157 targets (88 packages loaded, 0 targets configured)",
Severity: model.LogSeverity_SUCCESS,
CreatedAt: startTime.Add(35 * time.Second).Unix(),
},
{
Index: 8,
Log: "Error: Error building: logged 2 error(s)",
Severity: model.LogSeverity_ERROR,
CreatedAt: startTime.Add(45 * time.Second).Unix(),
},
}
return &webservice.GetStageLogResponse{
Blocks: resp,
}, nil
}
func (a *FakeWebAPI) CancelDeployment(ctx context.Context, req *webservice.CancelDeploymentRequest) (*webservice.CancelDeploymentResponse, error) {
return &webservice.CancelDeploymentResponse{
CommandId: uuid.New().String(),
}, nil
}
func (a *FakeWebAPI) ApproveStage(ctx context.Context, req *webservice.ApproveStageRequest) (*webservice.ApproveStageResponse, error) {
return &webservice.ApproveStageResponse{
CommandId: uuid.New().String(),
}, nil
}
func (a *FakeWebAPI) GetApplicationLiveState(ctx context.Context, req *webservice.GetApplicationLiveStateRequest) (*webservice.GetApplicationLiveStateResponse, error) {
now := time.Now()
snapshot := &model.ApplicationLiveStateSnapshot{
ApplicationId: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
Kubernetes: &model.KubernetesApplicationLiveState{
Resources: []*model.KubernetesResourceState{
{
Id: "f2c832a3-1f5b-4982-8f6e-72345ecb3c82",
Name: "demo-application",
ApiVersion: "networking.k8s.io/v1beta1",
Kind: "Ingress",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "8423fb53-5170-4864-a7d2-b84f8d36cb02",
Name: "demo-application",
ApiVersion: "v1",
Kind: "Service",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
Name: "demo-application",
ApiVersion: "apps/v1",
Kind: "Deployment",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "8621f186-6641-4f7a-9be4-5983eb647f8d",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
Name: "demo-application-9504e8601a",
ApiVersion: "apps/v1",
Kind: "ReplicaSet",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "ae5d0031-1f63-4396-b929-fa9987d1e6de",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"8621f186-6641-4f7a-9be4-5983eb647f8d",
},
Name: "demo-application-9504e8601a-7vrdw",
ApiVersion: "v1",
Kind: "Pod",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "f55c7891-ba25-44bb-bca4-ffbc16b0089f",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"8621f186-6641-4f7a-9be4-5983eb647f8d",
},
Name: "demo-application-9504e8601a-vlgd5",
ApiVersion: "v1",
Kind: "Pod",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "c2a81415-5bbf-44e8-9101-98bbd636bbeb",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"8621f186-6641-4f7a-9be4-5983eb647f8d",
},
Name: "demo-application-9504e8601a-tmwp5",
ApiVersion: "v1",
Kind: "Pod",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
},
},
Version: &model.ApplicationLiveStateVersion{
Index: 1,
Timestamp: now.Unix(),
},
}
return &webservice.GetApplicationLiveStateResponse{
Snapshot: snapshot,
}, nil
}
func (a *FakeWebAPI) GetProject(ctx context.Context, req *webservice.GetProjectRequest) (*webservice.GetProjectResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) GetMe(ctx context.Context, req *webservice.GetMeRequest) (*webservice.GetMeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) GetCommand(ctx context.Context, req *webservice.GetCommandRequest) (*webservice.GetCommandResponse, error) {
now := time.Now()
cmd := model.Command{
Id: uuid.New().String(),
PipedId: "debug-piped",
ApplicationId: "debug-application-id",
DeploymentId: "debug-deployment-id",
Commander: "anonymous",
Status: model.CommandStatus_COMMAND_NOT_HANDLED_YET,
Type: model.Command_CANCEL_DEPLOYMENT,
CancelDeployment: &model.Command_CancelDeployment{
DeploymentId: "debug-deployment-id-01",
},
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
}
return &webservice.GetCommandResponse{
Command: &cmd,
}, nil
}
| 1 | 8,304 | `req` is unused in RecreatePipedKey | pipe-cd-pipe | go |
@@ -85,11 +85,11 @@ export default function SetupAccountApproved() {
__( 'Site Kit detected AdSense code for a different account %s on your site. For a better ads experience, you should remove AdSense code that’s not linked to this AdSense account.', 'google-site-kit' ),
parseAccountID( existingTag )
);
- uncheckedMessage = __( 'By not placing the code, AdSense will not show ads on your website unless you’ve already got some AdSense code.', 'google-site-kit' );
+ uncheckedMessage = __( 'Please note that Adsense will not show ads on your website unless you’ve already placed the code.', 'google-site-kit' );
} else {
// No existing tag.
showProfile = false;
- uncheckedMessage = __( 'By not placing the code, AdSense will not show ads on your website unless you’ve already got some AdSense code.', 'google-site-kit' );
+ uncheckedMessage = __( 'Please note that Adsense will not show ads on your website unless you’ve already placed the code.', 'google-site-kit' );
}
return ( | 1 | /**
* AdSense Setup Account Approved component.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import { Fragment, useCallback } from '@wordpress/element';
import { __, sprintf } from '@wordpress/i18n';
/**
* Internal dependencies
*/
import Data from 'googlesitekit-data';
import Button from '../../../../components/Button';
import { STORE_NAME } from '../../datastore/constants';
import { parseAccountID } from '../../util/parsing';
import { ACCOUNT_STATUS_APPROVED } from '../../util/status';
import {
ErrorNotices,
UserProfile,
UseSnippetSwitch,
} from '../common';
const { useSelect, useDispatch } = Data;
export default function SetupAccountApproved() {
const existingTag = useSelect( ( select ) => select( STORE_NAME ).getExistingTag() );
const hasExistingTagPermission = useSelect( ( select ) => select( STORE_NAME ).hasExistingTagPermission() );
const originalAccountStatus = useSelect( ( select ) => select( STORE_NAME ).getOriginalAccountStatus() );
const isDoingSubmitChanges = useSelect( ( select ) => select( STORE_NAME ).isDoingSubmitChanges() );
const { completeAccountSetup } = useDispatch( STORE_NAME );
const continueHandler = useCallback( async () => {
if ( isDoingSubmitChanges ) {
return;
}
await completeAccountSetup();
}, [ isDoingSubmitChanges ] );
if ( undefined === existingTag || undefined === originalAccountStatus ) {
return null;
}
// Depending on whether the user's AdSense account was already approved
// before setting up the module in Site Kit or not, different wording
// needs to be used. This can be determined by checking the previously
// saved account status. If the previous value is the approved state or
// nothing, we know the account had already been approved.
const isApprovedFromVeryBeginning = '' === originalAccountStatus || ACCOUNT_STATUS_APPROVED === originalAccountStatus;
let label;
if ( isApprovedFromVeryBeginning ) {
label = __( 'Let Site Kit place AdSense code on your site to get your site approved', 'google-site-kit' );
} else {
label = __( 'Keep AdSense code placed by Site Kit', 'google-site-kit' );
}
let showProfile;
let checkedMessage;
let uncheckedMessage;
if ( existingTag && hasExistingTagPermission ) {
// Existing tag with permission.
showProfile = false;
checkedMessage = __( 'You’ve already got an AdSense code on your site for this account, we recommend you use Site Kit to place code to get the most out of AdSense.', 'google-site-kit' );
uncheckedMessage = checkedMessage;
} else if ( existingTag ) {
// Existing tag without permission.
showProfile = true;
checkedMessage = sprintf(
/* translators: %s: account ID */
__( 'Site Kit detected AdSense code for a different account %s on your site. For a better ads experience, you should remove AdSense code that’s not linked to this AdSense account.', 'google-site-kit' ),
parseAccountID( existingTag )
);
uncheckedMessage = __( 'By not placing the code, AdSense will not show ads on your website unless you’ve already got some AdSense code.', 'google-site-kit' );
} else {
// No existing tag.
showProfile = false;
uncheckedMessage = __( 'By not placing the code, AdSense will not show ads on your website unless you’ve already got some AdSense code.', 'google-site-kit' );
}
return (
<Fragment>
<h3 className="googlesitekit-heading-4 googlesitekit-setup-module__title">
{ isApprovedFromVeryBeginning &&
__( 'Looks like you’re already using AdSense', 'google-site-kit' )
}
{ ! isApprovedFromVeryBeginning &&
__( 'Your account is ready to use AdSense', 'google-site-kit' )
}
</h3>
<ErrorNotices />
<p>
{ isApprovedFromVeryBeginning &&
__( 'Site Kit will place AdSense code on your site to connect your site to AdSense and help you get the most out of ads. This means Google will automatically place ads for you in all the best places.', 'google-site-kit' )
}
{ ! isApprovedFromVeryBeginning &&
__( 'Site Kit has placed AdSense code on your site to connect your site to AdSense and help you get the most out of ads. This means Google will automatically place ads for you in all the best places.', 'google-site-kit' )
}
</p>
{ showProfile && <UserProfile /> }
<UseSnippetSwitch
label={ label }
checkedMessage={ checkedMessage }
uncheckedMessage={ uncheckedMessage }
saveOnChange={ true /* Save setting right when toggling. */ }
/>
<div className="googlesitekit-setup-module__action">
<Button
onClick={ continueHandler }
disabled={ isDoingSubmitChanges }
>
{ __( 'Continue', 'google-site-kit' ) }
</Button>
</div>
</Fragment>
);
}
| 1 | 37,298 | The "Adsense" term needs to be capitalized - I just noticed this was wrong through ACs and IB, so not a problem of the PR itself really. I'll quickly fix it. | google-site-kit-wp | js |
@@ -45,11 +45,15 @@ func init() {
beam.RegisterFunction(defToDecorPiece)
beam.RegisterFunction(fileToDecorPiece)
beam.RegisterFunction(groupCrossRefs)
+ beam.RegisterFunction(groupEdges)
beam.RegisterFunction(keyByPath)
beam.RegisterFunction(keyRef)
beam.RegisterFunction(moveSourceToKey)
beam.RegisterFunction(nodeToDecorPiece)
+ beam.RegisterFunction(nodeToEdges)
+ beam.RegisterFunction(nodeToReverseEdges)
beam.RegisterFunction(refToDecorPiece)
+ beam.RegisterFunction(reverseEdge)
beam.RegisterFunction(toDefinition)
beam.RegisterFunction(toEnclosingFile)
beam.RegisterFunction(toFiles) | 1 | /*
* Copyright 2018 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pipeline
import (
"fmt"
"reflect"
"sort"
"strconv"
"kythe.io/kythe/go/services/xrefs"
"kythe.io/kythe/go/serving/pipeline/nodes"
"kythe.io/kythe/go/serving/xrefs/assemble"
"kythe.io/kythe/go/util/compare"
"kythe.io/kythe/go/util/kytheuri"
"kythe.io/kythe/go/util/schema"
"kythe.io/kythe/go/util/schema/edges"
"kythe.io/kythe/go/util/schema/facts"
kinds "kythe.io/kythe/go/util/schema/nodes"
"github.com/apache/beam/sdks/go/pkg/beam"
cpb "kythe.io/kythe/proto/common_go_proto"
ppb "kythe.io/kythe/proto/pipeline_go_proto"
scpb "kythe.io/kythe/proto/schema_go_proto"
srvpb "kythe.io/kythe/proto/serving_go_proto"
spb "kythe.io/kythe/proto/storage_go_proto"
)
func init() {
beam.RegisterFunction(defToDecorPiece)
beam.RegisterFunction(fileToDecorPiece)
beam.RegisterFunction(groupCrossRefs)
beam.RegisterFunction(keyByPath)
beam.RegisterFunction(keyRef)
beam.RegisterFunction(moveSourceToKey)
beam.RegisterFunction(nodeToDecorPiece)
beam.RegisterFunction(refToDecorPiece)
beam.RegisterFunction(toDefinition)
beam.RegisterFunction(toEnclosingFile)
beam.RegisterFunction(toFiles)
beam.RegisterFunction(toRefs)
beam.RegisterType(reflect.TypeOf((*combineDecorPieces)(nil)).Elem())
beam.RegisterType(reflect.TypeOf((*ticketKey)(nil)).Elem())
beam.RegisterType(reflect.TypeOf((*ppb.DecorationPiece)(nil)).Elem())
beam.RegisterType(reflect.TypeOf((*ppb.Node)(nil)).Elem())
beam.RegisterType(reflect.TypeOf((*ppb.Reference)(nil)).Elem())
beam.RegisterType(reflect.TypeOf((*spb.Entry)(nil)).Elem())
beam.RegisterType(reflect.TypeOf((*spb.VName)(nil)).Elem())
beam.RegisterType(reflect.TypeOf((*srvpb.CorpusRoots)(nil)).Elem())
beam.RegisterType(reflect.TypeOf((*srvpb.ExpandedAnchor)(nil)).Elem())
beam.RegisterType(reflect.TypeOf((*srvpb.File)(nil)).Elem())
beam.RegisterType(reflect.TypeOf((*srvpb.FileDecorations)(nil)).Elem())
beam.RegisterType(reflect.TypeOf((*srvpb.FileDirectory)(nil)).Elem())
beam.RegisterType(reflect.TypeOf((*srvpb.PagedCrossReferences)(nil)).Elem())
beam.RegisterType(reflect.TypeOf((*srvpb.PagedCrossReferences_Page)(nil)).Elem())
}
// KytheBeam controls the lifetime and generation of PCollections in the Kythe
// pipeline.
type KytheBeam struct {
s beam.Scope
fileVNames beam.PCollection // *spb.VName
nodes beam.PCollection // *ppb.Node
files beam.PCollection // *srvpb.File
refs beam.PCollection // *ppb.Reference
}
// FromNodes creates a KytheBeam pipeline from an input collection of
// *spb.Nodes.
func FromNodes(s beam.Scope, nodes beam.PCollection) *KytheBeam { return &KytheBeam{s: s, nodes: nodes} }
// FromEntries creates a KytheBeam pipeline from an input collection of
// *spb.Entry messages.
func FromEntries(s beam.Scope, entries beam.PCollection) *KytheBeam {
return FromNodes(s, nodes.FromEntries(s, entries))
}
// CrossReferences returns a Kythe file decorations table derived from the Kythe
// input graph. The beam.PCollections have elements of type
// KV<string, *srvpb.PagedCrossReferences> and
// KV<string, *srvpb.PagedCrossReferences_Page>, respectively.
func (k *KytheBeam) CrossReferences() (sets, pages beam.PCollection) {
s := k.s.Scope("CrossReferences")
refs := beam.GroupByKey(s, beam.ParDo(s, keyRef, k.References()))
// TODO(schroederc): related nodes
// TODO(schroederc): callers
// TODO(schroederc): MarkedSource
// TODO(schroederc): source_node
return beam.ParDo2(s, groupCrossRefs, refs)
}
// groupCrossRefs emits *srvpb.PagedCrossReferences and *srvpb.PagedCrossReferences_Pages for a
// single node's collection of *ppb.References.
func groupCrossRefs(key *spb.VName, refStream func(**ppb.Reference) bool, emitSet func(string, *srvpb.PagedCrossReferences), emitPage func(string, *srvpb.PagedCrossReferences_Page)) {
set := &srvpb.PagedCrossReferences{SourceTicket: kytheuri.ToString(key)}
// TODO(schroederc): add paging
groups := make(map[string]*srvpb.PagedCrossReferences_Group)
var ref *ppb.Reference
for refStream(&ref) {
kind := refKind(ref)
g, ok := groups[kind]
if !ok {
g = &srvpb.PagedCrossReferences_Group{Kind: kind}
groups[kind] = g
set.Group = append(set.Group, g)
}
g.Anchor = append(g.Anchor, ref.Anchor)
}
sort.Slice(set.Group, func(i, j int) bool { return set.Group[i].Kind < set.Group[j].Kind })
for _, g := range set.Group {
sort.Slice(g.Anchor, func(i, j int) bool { return g.Anchor[i].Ticket < g.Anchor[j].Ticket })
}
emitSet("xrefs:"+set.SourceTicket, set)
}
func keyRef(r *ppb.Reference) (*spb.VName, *ppb.Reference) {
return r.Source, &ppb.Reference{
Kind: r.Kind,
Anchor: r.Anchor,
}
}
// Decorations returns a Kythe file decorations table derived from the Kythe
// input graph. The beam.PCollection has elements of type
// KV<string, *srvpb.FileDecorations>.
func (k *KytheBeam) Decorations() beam.PCollection {
s := k.s.Scope("Decorations")
targets := beam.ParDo(s, toEnclosingFile, k.References())
bareNodes := beam.ParDo(s, &nodes.Filter{IncludeEdges: []string{}}, k.nodes)
decor := beam.ParDo(s, refToDecorPiece, k.References())
files := beam.ParDo(s, fileToDecorPiece, k.getFiles())
nodes := beam.ParDo(s, nodeToDecorPiece,
beam.CoGroupByKey(s, beam.ParDo(s, moveSourceToKey, bareNodes), targets))
defs := beam.ParDo(s, defToDecorPiece,
beam.CoGroupByKey(s, k.directDefinitions(), targets))
// TODO(schroederc): overrides
// TODO(schroederc): diagnostics
pieces := beam.Flatten(s, decor, files, nodes, defs)
return beam.ParDo(s, &ticketKey{"decor:"}, beam.CombinePerKey(s, &combineDecorPieces{}, pieces))
}
type ticketKey struct{ Prefix string }
func (t *ticketKey) ProcessElement(key *spb.VName, val beam.T) (string, beam.T) {
return t.Prefix + kytheuri.ToString(key), val
}
func toEnclosingFile(r *ppb.Reference) (*spb.VName, *spb.VName, error) {
anchor, err := kytheuri.ToVName(r.Anchor.Ticket)
if err != nil {
return nil, nil, err
}
file := fileVName(anchor)
return r.Source, file, nil
}
// combineDecorPieces combines *ppb.DecorationPieces into a single *srvpb.FileDecorations.
type combineDecorPieces struct{}
func (c *combineDecorPieces) CreateAccumulator() *srvpb.FileDecorations {
return &srvpb.FileDecorations{}
}
func (c *combineDecorPieces) MergeAccumulators(accum, n *srvpb.FileDecorations) *srvpb.FileDecorations {
return accum
}
func (c *combineDecorPieces) AddInput(accum *srvpb.FileDecorations, p *ppb.DecorationPiece) *srvpb.FileDecorations {
switch p := p.Piece.(type) {
case *ppb.DecorationPiece_Reference:
ref := p.Reference
accum.Decoration = append(accum.Decoration, &srvpb.FileDecorations_Decoration{
Anchor: &srvpb.RawAnchor{
StartOffset: ref.Anchor.Span.Start.ByteOffset,
EndOffset: ref.Anchor.Span.End.ByteOffset,
},
Kind: refKind(ref),
Target: kytheuri.ToString(ref.Source),
})
case *ppb.DecorationPiece_File:
accum.File = p.File
case *ppb.DecorationPiece_Node:
node := p.Node
n := &srvpb.Node{Ticket: kytheuri.ToString(node.Source)}
if kind := nodes.Kind(node); kind != "" {
n.Fact = append(n.Fact, &cpb.Fact{
Name: facts.NodeKind,
Value: []byte(kind),
})
}
if subkind := nodes.Subkind(node); subkind != "" {
n.Fact = append(n.Fact, &cpb.Fact{
Name: facts.Subkind,
Value: []byte(subkind),
})
}
for _, f := range node.Fact {
n.Fact = append(n.Fact, &cpb.Fact{
Name: nodes.FactName(f),
Value: f.Value,
})
}
sort.Slice(n.Fact, func(i, j int) bool { return n.Fact[i].Name < n.Fact[j].Name })
accum.Target = append(accum.Target, n)
case *ppb.DecorationPiece_Definition_:
// TODO(schroederc): redesign *srvpb.FileDecorations to not need invasive
// changes to add a node's definition
def := p.Definition
accum.TargetDefinitions = append(accum.TargetDefinitions, def.Definition)
// Add a marker to associate the definition and node. ExtractOutput will
// later embed the definition within accum.Target/accum.TargetOverride.
accum.Target = append(accum.Target, &srvpb.Node{
Ticket: kytheuri.ToString(def.Node),
DefinitionLocation: &srvpb.ExpandedAnchor{Ticket: def.Definition.Ticket},
})
default:
panic(fmt.Errorf("unhandled DecorationPiece: %T", p))
}
return accum
}
func (c *combineDecorPieces) ExtractOutput(fd *srvpb.FileDecorations) *srvpb.FileDecorations {
// Embed definitions for Decorations and Overrides
for i := len(fd.Target) - 1; i >= 0; i-- {
if fd.Target[i].DefinitionLocation == nil {
continue
}
node, def := fd.Target[i].Ticket, fd.Target[i].DefinitionLocation.Ticket
fd.Target = append(fd.Target[:i], fd.Target[i+1:]...)
for _, d := range fd.Decoration {
if d.Target == node {
d.TargetDefinition = def
}
}
for _, o := range fd.TargetOverride {
if o.Overridden == node {
o.OverriddenDefinition = def
}
}
}
sort.Slice(fd.Decoration, func(i, j int) bool {
if c := compare.Ints(int(fd.Decoration[i].Anchor.StartOffset), int(fd.Decoration[j].Anchor.StartOffset)); c != compare.EQ {
return c == compare.LT
} else if c := compare.Ints(int(fd.Decoration[i].Anchor.EndOffset), int(fd.Decoration[j].Anchor.EndOffset)); c != compare.EQ {
return c == compare.LT
} else if c := compare.Strings(fd.Decoration[i].Kind, fd.Decoration[j].Kind); c != compare.EQ {
return c == compare.LT
}
return fd.Decoration[i].Target < fd.Decoration[j].Target
})
sort.Slice(fd.Target, func(i, j int) bool { return fd.Target[i].Ticket < fd.Target[j].Ticket })
return fd
}
func fileToDecorPiece(src *spb.VName, f *srvpb.File) (*spb.VName, *ppb.DecorationPiece) {
return src, &ppb.DecorationPiece{Piece: &ppb.DecorationPiece_File{f}}
}
func refToDecorPiece(r *ppb.Reference) (*spb.VName, *ppb.DecorationPiece, error) {
_, file, err := toEnclosingFile(r)
if err != nil {
return nil, nil, err
}
return file, &ppb.DecorationPiece{
Piece: &ppb.DecorationPiece_Reference{&ppb.Reference{
Source: r.Source,
Kind: r.Kind,
Anchor: r.Anchor,
}},
}, nil
}
func fileVName(anchor *spb.VName) *spb.VName {
return &spb.VName{
Corpus: anchor.Corpus,
Root: anchor.Root,
Path: anchor.Path,
}
}
func nodeToDecorPiece(key *spb.VName, node func(**ppb.Node) bool, file func(**spb.VName) bool, emit func(*spb.VName, *ppb.DecorationPiece)) {
var n, singleNode *ppb.Node
for node(&n) {
singleNode = n
}
if singleNode == nil {
return
}
piece := &ppb.DecorationPiece{
Piece: &ppb.DecorationPiece_Node{&ppb.Node{
Source: key,
Kind: singleNode.Kind,
Subkind: singleNode.Subkind,
Fact: singleNode.Fact,
Edge: singleNode.Edge,
}},
}
var f *spb.VName
for file(&f) {
emit(f, piece)
}
}
func defToDecorPiece(node *spb.VName, defs func(**srvpb.ExpandedAnchor) bool, file func(**spb.VName) bool, emit func(*spb.VName, *ppb.DecorationPiece)) {
var def *srvpb.ExpandedAnchor
for defs(&def) {
// TODO(schroederc): select ambiguous definition better
break // pick first known definition
}
if def == nil {
return
}
piece := &ppb.DecorationPiece{
Piece: &ppb.DecorationPiece_Definition_{&ppb.DecorationPiece_Definition{
Node: node,
Definition: def,
}},
}
var f *spb.VName
for file(&f) {
emit(f, piece)
}
}
// Nodes returns all *ppb.Nodes from the Kythe input graph.
func (k *KytheBeam) Nodes() beam.PCollection { return k.nodes }
// References returns all derived *ppb.References from the Kythe input graph.
func (k *KytheBeam) References() beam.PCollection {
if k.refs.IsValid() {
return k.refs
}
s := k.s.Scope("References")
anchors := beam.ParDo(s, keyByPath, beam.ParDo(s,
&nodes.Filter{
FilterByKind: []string{kinds.Anchor},
IncludeFacts: []string{
facts.AnchorStart, facts.AnchorEnd,
facts.SnippetStart, facts.SnippetEnd,
},
}, k.nodes))
k.refs = beam.ParDo(s, toRefs, beam.CoGroupByKey(s, k.getFiles(), anchors))
return k.refs
}
func (k *KytheBeam) getFiles() beam.PCollection {
if !k.files.IsValid() {
fileNodes := beam.ParDo(k.s,
&nodes.Filter{
FilterByKind: []string{kinds.File},
IncludeFacts: []string{facts.Text, facts.TextEncoding},
}, k.nodes)
k.files = beam.ParDo(k.s, toFiles, fileNodes)
}
return k.files
}
func keyByPath(n *ppb.Node) (*spb.VName, *ppb.Node) {
return &spb.VName{Corpus: n.Source.Corpus, Root: n.Source.Root, Path: n.Source.Path}, n
}
func toRefs(p *spb.VName, file func(**srvpb.File) bool, anchor func(**ppb.Node) bool, emit func(*ppb.Reference)) error {
var f *srvpb.File
if !file(&f) {
return nil
}
return normalizeAnchors(f, anchor, emit)
}
func toFiles(n *ppb.Node) (*spb.VName, *srvpb.File) {
var f srvpb.File
for _, fact := range n.Fact {
switch fact.GetKytheName() {
case scpb.FactName_TEXT:
f.Text = fact.Value
case scpb.FactName_TEXT_ENCODING:
f.Encoding = string(fact.Value)
}
}
return n.Source, &f
}
func normalizeAnchors(file *srvpb.File, anchor func(**ppb.Node) bool, emit func(*ppb.Reference)) error {
norm := xrefs.NewNormalizer(file.Text)
var n *ppb.Node
for anchor(&n) {
raw, err := toRawAnchor(n)
if err != nil {
return err
}
a, err := assemble.ExpandAnchor(raw, file, norm, "")
if err != nil {
return err
}
var parent *spb.VName
for _, e := range n.Edge {
if e.GetKytheKind() == scpb.EdgeKind_CHILD_OF {
parent = e.Target
break
}
}
for _, e := range n.Edge {
if e.GetKytheKind() == scpb.EdgeKind_CHILD_OF {
continue
}
ref := &ppb.Reference{
Source: e.Target,
Anchor: a,
Scope: parent,
}
if k := e.GetKytheKind(); k == scpb.EdgeKind_UNKNOWN_EDGE_KIND {
ref.Kind = &ppb.Reference_GenericKind{e.GetGenericKind()}
} else {
ref.Kind = &ppb.Reference_KytheKind{k}
}
emit(ref)
}
}
return nil
}
func toRawAnchor(n *ppb.Node) (*srvpb.RawAnchor, error) {
var a srvpb.RawAnchor
for _, f := range n.Fact {
i, err := strconv.Atoi(string(f.Value))
if err != nil {
return nil, fmt.Errorf("invalid integer fact value for %q: %v", f.GetKytheName(), err)
}
n := int32(i)
switch f.GetKytheName() {
case scpb.FactName_LOC_START:
a.StartOffset = n
case scpb.FactName_LOC_END:
a.EndOffset = n
case scpb.FactName_SNIPPET_START:
a.SnippetStart = n
case scpb.FactName_SNIPPET_END:
a.SnippetEnd = n
default:
return nil, fmt.Errorf("unhandled fact: %v", f)
}
}
a.Ticket = kytheuri.ToString(n.Source)
return &a, nil
}
func moveSourceToKey(n *ppb.Node) (*spb.VName, *ppb.Node) {
return n.Source, &ppb.Node{
Kind: n.Kind,
Subkind: n.Subkind,
Fact: n.Fact,
Edge: n.Edge,
}
}
func (k *KytheBeam) directDefinitions() beam.PCollection {
s := k.s.Scope("DirectDefinitions")
return beam.ParDo(s, toDefinition, k.References())
}
func toDefinition(r *ppb.Reference, emit func(*spb.VName, *srvpb.ExpandedAnchor)) error {
if edges.IsVariant(refKind(r), edges.Defines) {
emit(r.Source, r.Anchor)
}
return nil
}
func refKind(r *ppb.Reference) string {
if k := r.GetKytheKind(); k != scpb.EdgeKind_UNKNOWN_EDGE_KIND {
return schema.EdgeKindString(k)
}
return r.GetGenericKind()
}
| 1 | 8,490 | do which methods need to be registered? is it basically just the ones that are directly referenced in ParDo calls? (The Beam GoDoc doesn't say anything about it. :/ ) | kythe-kythe | go |
@@ -143,7 +143,8 @@ public class Access extends AbstractApiBean {
downloadInstance.setFileCitationEndNote(datasetService.createCitationXML(datasetVersion, fileMetadata));
downloadInstance.setFileCitationRIS(datasetService.createCitationRIS(datasetVersion, fileMetadata));
-
+ downloadInstance.setFileCitationBibtex(datasetService.createCitationBibtex(datasetVersion, fileMetadata));
+
ByteArrayOutputStream outStream = null;
outStream = new ByteArrayOutputStream();
| 1 | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package edu.harvard.iq.dataverse.api;
import edu.harvard.iq.dataverse.DataFile;
import edu.harvard.iq.dataverse.FileMetadata;
import edu.harvard.iq.dataverse.DataFileServiceBean;
import edu.harvard.iq.dataverse.Dataset;
import edu.harvard.iq.dataverse.DatasetVersion;
import edu.harvard.iq.dataverse.DatasetVersionServiceBean;
import edu.harvard.iq.dataverse.DatasetServiceBean;
import edu.harvard.iq.dataverse.Dataverse;
import edu.harvard.iq.dataverse.DataverseServiceBean;
import edu.harvard.iq.dataverse.DataverseSession;
import edu.harvard.iq.dataverse.DataverseTheme;
import edu.harvard.iq.dataverse.PermissionServiceBean;
import edu.harvard.iq.dataverse.authorization.Permission;
import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser;
import edu.harvard.iq.dataverse.authorization.users.GuestUser;
import edu.harvard.iq.dataverse.dataaccess.DataFileIO;
import edu.harvard.iq.dataverse.dataaccess.DataFileZipper;
import edu.harvard.iq.dataverse.dataaccess.FileAccessIO;
import edu.harvard.iq.dataverse.dataaccess.OptionalAccessService;
import edu.harvard.iq.dataverse.dataaccess.ImageThumbConverter;
import edu.harvard.iq.dataverse.datavariable.DataVariable;
import edu.harvard.iq.dataverse.datavariable.VariableServiceBean;
import edu.harvard.iq.dataverse.export.DDIExportServiceBean;
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
import edu.harvard.iq.dataverse.util.SystemConfig;
import edu.harvard.iq.dataverse.worldmapauth.WorldMapTokenServiceBean;
import java.util.List;
import java.util.logging.Logger;
import javax.ejb.EJB;
import java.io.InputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Properties;
import javax.inject.Inject;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.UriInfo;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.NotFoundException;
import javax.ws.rs.QueryParam;
import javax.ws.rs.ServiceUnavailableException;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.StreamingOutput;
/*
Custom API exceptions [NOT YET IMPLEMENTED]
import edu.harvard.iq.dataverse.api.exceptions.NotFoundException;
import edu.harvard.iq.dataverse.api.exceptions.ServiceUnavailableException;
import edu.harvard.iq.dataverse.api.exceptions.PermissionDeniedException;
import edu.harvard.iq.dataverse.api.exceptions.AuthorizationRequiredException;
*/
/**
*
* @author Leonid Andreev
*
* The data (file) access API is based on the DVN access API v.1.0 (that came
* with the v.3.* of the DVN app) and extended for DVN 4.0 to include some
* extra fancy functionality, such as subsetting individual columns in tabular
* data files and more.
*/
@Path("access")
public class Access extends AbstractApiBean {
private static final Logger logger = Logger.getLogger(Access.class.getCanonicalName());
@EJB
DataFileServiceBean dataFileService;
@EJB
DatasetServiceBean datasetService;
@EJB
DatasetVersionServiceBean versionService;
@EJB
DataverseServiceBean dataverseService;
@EJB
VariableServiceBean variableService;
@EJB
SettingsServiceBean settingsService;
@EJB
SystemConfig systemConfig;
@EJB
DDIExportServiceBean ddiExportService;
@EJB
PermissionServiceBean permissionService;
@Inject
DataverseSession session;
@EJB
WorldMapTokenServiceBean worldMapTokenServiceBean;
private static final String API_KEY_HEADER = "X-Dataverse-key";
//@EJB
// TODO:
// versions? -- L.A. 4.0 beta 10
@Path("datafile/bundle/{fileId}")
@GET
@Produces({"application/zip"})
public BundleDownloadInstance datafileBundle(@PathParam("fileId") Long fileId, @QueryParam("key") String apiToken, @Context UriInfo uriInfo, @Context HttpHeaders headers, @Context HttpServletResponse response) /*throws NotFoundException, ServiceUnavailableException, PermissionDeniedException, AuthorizationRequiredException*/ {
DataFile df = dataFileService.find(fileId);
if (df == null) {
logger.warning("Access: datafile service could not locate a DataFile object for id "+fileId+"!");
throw new WebApplicationException(Response.Status.NOT_FOUND);
}
if (apiToken == null || apiToken.equals("")) {
apiToken = headers.getHeaderString(API_KEY_HEADER);
}
// This will throw a WebApplicationException, with the correct
// exit code, if access isn't authorized:
checkAuthorization(df, apiToken);
DownloadInfo dInfo = new DownloadInfo(df);
BundleDownloadInstance downloadInstance = new BundleDownloadInstance(dInfo);
FileMetadata fileMetadata = df.getFileMetadata();
DatasetVersion datasetVersion = df.getOwner().getLatestVersion();
downloadInstance.setFileCitationEndNote(datasetService.createCitationXML(datasetVersion, fileMetadata));
downloadInstance.setFileCitationRIS(datasetService.createCitationRIS(datasetVersion, fileMetadata));
ByteArrayOutputStream outStream = null;
outStream = new ByteArrayOutputStream();
try {
ddiExportService.exportDataFile(
fileId,
outStream,
null,
null);
downloadInstance.setFileDDIXML(outStream.toString());
} catch (Exception ex) {
// if we can't generate the DDI, it's ok;
// we'll just generate the bundle without it.
}
return downloadInstance;
}
@Path("datafile/{fileId}")
@GET
@Produces({ "application/xml" })
public DownloadInstance datafile(@PathParam("fileId") Long fileId, @QueryParam("key") String apiToken, @Context UriInfo uriInfo, @Context HttpHeaders headers, @Context HttpServletResponse response) /*throws NotFoundException, ServiceUnavailableException, PermissionDeniedException, AuthorizationRequiredException*/ {
DataFile df = dataFileService.find(fileId);
if (df == null) {
logger.warning("Access: datafile service could not locate a DataFile object for id "+fileId+"!");
throw new WebApplicationException(Response.Status.NOT_FOUND);
}
if (apiToken == null || apiToken.equals("")) {
apiToken = headers.getHeaderString(API_KEY_HEADER);
}
// This will throw a WebApplicationException, with the correct
// exit code, if access isn't authorized:
checkAuthorization(df, apiToken);
DownloadInfo dInfo = new DownloadInfo(df);
if (dataFileService.thumbnailSupported(df)) {
dInfo.addServiceAvailable(new OptionalAccessService("thumbnail", "image/png", "imageThumb=true", "Image Thumbnail (64x64)"));
}
if (df.isTabularData()) {
String originalMimeType = df.getDataTable().getOriginalFileFormat();
dInfo.addServiceAvailable(new OptionalAccessService("original", originalMimeType, "format=original","Saved original (" + originalMimeType + ")"));
dInfo.addServiceAvailable(new OptionalAccessService("R", "application/x-rlang-transport", "format=RData", "Data in R format"));
dInfo.addServiceAvailable(new OptionalAccessService("preprocessed", "application/json", "format=prep", "Preprocessed data in JSON"));
dInfo.addServiceAvailable(new OptionalAccessService("subset", "text/tab-separated-values", "variables=<LIST>", "Column-wise Subsetting"));
}
DownloadInstance downloadInstance = new DownloadInstance(dInfo);
for (String key : uriInfo.getQueryParameters().keySet()) {
String value = uriInfo.getQueryParameters().getFirst(key);
if (downloadInstance.isDownloadServiceSupported(key, value)) {
// this automatically sets the conversion parameters in
// the download instance to key and value;
// TODO: I should probably set these explicitly instead.
if (downloadInstance.getConversionParam().equals("subset")) {
String subsetParam = downloadInstance.getConversionParamValue();
String variableIdParams[] = subsetParam.split(",");
if (variableIdParams != null && variableIdParams.length > 0) {
logger.fine(variableIdParams.length + " tokens;");
for (int i = 0; i < variableIdParams.length; i++) {
logger.fine("token: " + variableIdParams[i]);
String token = variableIdParams[i].replaceFirst("^v", "");
Long variableId = null;
try {
variableId = new Long(token);
} catch (NumberFormatException nfe) {
variableId = null;
}
if (variableId != null) {
logger.fine("attempting to look up variable id " + variableId);
if (variableService != null) {
DataVariable variable = variableService.find(variableId);
if (variable != null) {
if (downloadInstance.getExtraArguments() == null) {
downloadInstance.setExtraArguments(new ArrayList<Object>());
}
logger.fine("putting variable id "+variable.getId()+" on the parameters list of the download instance.");
downloadInstance.getExtraArguments().add(variable);
//if (!variable.getDataTable().getDataFile().getId().equals(sf.getId())) {
//variableList.add(variable);
//}
}
} else {
logger.fine("variable service is null.");
}
}
}
}
}
break;
} else {
// Service unknown/not supported/bad arguments, etc.:
// TODO: throw new ServiceUnavailableException();
}
}
/*
* Provide content type header:
* (this will be done by the InstanceWriter class - ?)
*/
/* Provide "Access-Control-Allow-Origin" header:
* (may not be needed here... - that header was added specifically
* to get the data exploration app to be able to access the metadata
* API; may have been something specific to Vito's installation too
* -- L.A.)
*/
response.setHeader("Access-Control-Allow-Origin", "*");
/*
* Provide some browser-friendly headers: (?)
*/
//return retValue;
return downloadInstance;
}
/*
* Variants of the Access API calls for retrieving datafile-level
* Metadata.
*/
// Metadata format defaults to DDI:
@Path("datafile/{fileId}/metadata")
@GET
@Produces({"text/xml"})
public String tabularDatafileMetadata(@PathParam("fileId") Long fileId, @QueryParam("exclude") String exclude, @QueryParam("include") String include, @Context HttpHeaders header, @Context HttpServletResponse response) throws NotFoundException, ServiceUnavailableException /*, PermissionDeniedException, AuthorizationRequiredException*/ {
return tabularDatafileMetadataDDI(fileId, exclude, include, header, response);
}
/*
* This has been moved here, under /api/access, from the /api/meta hierarchy
* which we are going to retire.
*/
@Path("datafile/{fileId}/metadata/ddi")
@GET
@Produces({"text/xml"})
public String tabularDatafileMetadataDDI(@PathParam("fileId") Long fileId, @QueryParam("exclude") String exclude, @QueryParam("include") String include, @Context HttpHeaders header, @Context HttpServletResponse response) throws NotFoundException, ServiceUnavailableException /*, PermissionDeniedException, AuthorizationRequiredException*/ {
String retValue = "";
DataFile dataFile = null;
//httpHeaders.add("Content-disposition", "attachment; filename=\"dataverse_files.zip\"");
//httpHeaders.add("Content-Type", "application/zip; name=\"dataverse_files.zip\"");
response.setHeader("Content-disposition", "attachment; filename=\"dataverse_files.zip\"");
dataFile = dataFileService.find(fileId);
if (dataFile == null) {
throw new NotFoundException();
}
String fileName = dataFile.getFileMetadata().getLabel().replaceAll("\\.tab$", "-ddi.xml");
response.setHeader("Content-disposition", "attachment; filename=\""+fileName+"\"");
response.setHeader("Content-Type", "application/xml; name=\""+fileName+"\"");
ByteArrayOutputStream outStream = null;
outStream = new ByteArrayOutputStream();
try {
ddiExportService.exportDataFile(
fileId,
outStream,
exclude,
include);
retValue = outStream.toString();
} catch (Exception e) {
// For whatever reason we've failed to generate a partial
// metadata record requested.
// We return Service Unavailable.
throw new ServiceUnavailableException();
}
response.setHeader("Access-Control-Allow-Origin", "*");
return retValue;
}
@Path("variable/{varId}/metadata/ddi")
@GET
@Produces({ "application/xml" })
public String dataVariableMetadataDDI(@PathParam("varId") Long varId, @QueryParam("exclude") String exclude, @QueryParam("include") String include, @Context HttpHeaders header, @Context HttpServletResponse response) /*throws NotFoundException, ServiceUnavailableException, PermissionDeniedException, AuthorizationRequiredException*/ {
String retValue = "";
ByteArrayOutputStream outStream = null;
try {
outStream = new ByteArrayOutputStream();
ddiExportService.exportDataVariable(
varId,
outStream,
exclude,
include);
} catch (Exception e) {
// For whatever reason we've failed to generate a partial
// metadata record requested. We simply return an empty string.
return retValue;
}
retValue = outStream.toString();
response.setHeader("Access-Control-Allow-Origin", "*");
return retValue;
}
/*
* "Preprocessed data" metadata format:
* (this was previously provided as a "format conversion" option of the
* file download form of the access API call)
*/
@Path("datafile/{fileId}/metadata/preprocessed")
@GET
@Produces({"text/xml"})
public DownloadInstance tabularDatafileMetadataPreprocessed(@PathParam("fileId") Long fileId, @QueryParam("key") String apiToken, @Context UriInfo uriInfo, @Context HttpHeaders headers, @Context HttpServletResponse response) throws ServiceUnavailableException {
DataFile df = dataFileService.find(fileId);
if (df == null) {
logger.warning("Access: datafile service could not locate a DataFile object for id "+fileId+"!");
throw new WebApplicationException(Response.Status.NOT_FOUND);
}
if (apiToken == null || apiToken.equals("")) {
apiToken = headers.getHeaderString(API_KEY_HEADER);
}
// This will throw a WebApplicationException, with the correct
// exit code, if access isn't authorized:
checkAuthorization(df, apiToken);
DownloadInfo dInfo = new DownloadInfo(df);
if (df.isTabularData()) {
dInfo.addServiceAvailable(new OptionalAccessService("preprocessed", "application/json", "format=prep", "Preprocessed data in JSON"));
} else {
throw new ServiceUnavailableException("Preprocessed Content Metadata requested on a non-tabular data file.");
}
DownloadInstance downloadInstance = new DownloadInstance(dInfo);
if (downloadInstance.isDownloadServiceSupported("format", "prep")) {
logger.fine("Preprocessed data for tabular file "+fileId);
}
response.setHeader("Access-Control-Allow-Origin", "*");
return downloadInstance;
}
/*
* API method for downloading zipped bundles of multiple files:
*/
@Path("datafiles/{fileIds}")
@GET
@Produces({"application/zip"})
public /*ZippedDownloadInstance*/ Response datafiles(@PathParam("fileIds") String fileIds, @QueryParam("key") String apiTokenParam, @Context UriInfo uriInfo, @Context HttpHeaders headers, @Context HttpServletResponse response) throws WebApplicationException /*throws NotFoundException, ServiceUnavailableException, PermissionDeniedException, AuthorizationRequiredException*/ {
// create a Download Instance without, without a primary Download Info object:
//ZippedDownloadInstance downloadInstance = new ZippedDownloadInstance();
long setLimit = systemConfig.getZipDownloadLimit();
if (!(setLimit > 0L)) {
setLimit = DataFileZipper.DEFAULT_ZIPFILE_LIMIT;
}
long zipDownloadSizeLimit = setLimit;
logger.fine("setting zip download size limit to " + zipDownloadSizeLimit + " bytes.");
if (fileIds == null || fileIds.equals("")) {
throw new WebApplicationException(Response.Status.BAD_REQUEST);
}
String apiToken = (apiTokenParam == null || apiTokenParam.equals(""))
? headers.getHeaderString(API_KEY_HEADER)
: apiTokenParam;
StreamingOutput stream = new StreamingOutput() {
@Override
public void write(OutputStream os) throws IOException,
WebApplicationException {
String fileIdParams[] = fileIds.split(",");
DataFileZipper zipper = null;
boolean accessToUnrestrictedFileAuthorized = false;
String fileManifest = "";
long sizeTotal = 0L;
if (fileIdParams != null && fileIdParams.length > 0) {
logger.fine(fileIdParams.length + " tokens;");
for (int i = 0; i < fileIdParams.length; i++) {
logger.fine("token: " + fileIdParams[i]);
Long fileId = null;
try {
fileId = new Long(fileIdParams[i]);
} catch (NumberFormatException nfe) {
fileId = null;
}
if (fileId != null) {
logger.fine("attempting to look up file id " + fileId);
DataFile file = dataFileService.find(fileId);
if (file != null) {
if ((accessToUnrestrictedFileAuthorized && !file.isRestricted())
|| isAccessAuthorized(file, apiToken)) {
if (!file.isRestricted()) {
accessToUnrestrictedFileAuthorized = true;
}
logger.fine("adding datafile (id=" + file.getId() + ") to the download list of the ZippedDownloadInstance.");
//downloadInstance.addDataFile(file);
if (zipper == null) {
// This is the first file we can serve - so we now know that we are going to be able
// to produce some output.
zipper = new DataFileZipper(os);
zipper.setFileManifest(fileManifest);
response.setHeader("Content-disposition", "attachment; filename=\"dataverse_files.zip\"");
response.setHeader("Content-Type", "application/zip; name=\"dataverse_files.zip\"");
}
if (sizeTotal + file.getFilesize() < zipDownloadSizeLimit) {
sizeTotal += zipper.addFileToZipStream(file);
} else {
String fileName = file.getFileMetadata().getLabel();
String mimeType = file.getContentType();
zipper.addToManifest(fileName + " (" + mimeType + ") " + " skipped because the total size of the download bundle exceeded the limit of " + zipDownloadSizeLimit + " bytes.\r\n");
}
} else {
if (zipper == null) {
fileManifest = fileManifest + file.getFileMetadata().getLabel() + " IS RESTRICTED AND CANNOT BE DOWNLOADED\r\n";
} else {
zipper.addToManifest(file.getFileMetadata().getLabel() + " IS RESTRICTED AND CANNOT BE DOWNLOADED\r\n");
}
}
} else {
// Or should we just drop it and make a note in the Manifest?
throw new WebApplicationException(Response.Status.NOT_FOUND);
}
}
}
} else {
throw new WebApplicationException(Response.Status.BAD_REQUEST);
}
if (zipper == null) {
// If the DataFileZipper object is still NULL, it means that
// there were file ids supplied - but none of the corresponding
// files were accessible for this user.
// In which casew we don't bother generating any output, and
// just give them a 403:
throw new WebApplicationException(Response.Status.FORBIDDEN);
}
// This will add the generated File Manifest to the zipped output,
// then flush and close the stream:
zipper.finalizeZipStream();
//os.flush();
//os.close();
}
};
return Response.ok(stream).build();
}
@Path("tempPreview/{fileSystemId}")
@GET
@Produces({"image/png"})
public InputStream tempPreview(@PathParam("fileSystemId") String fileSystemId, @Context UriInfo uriInfo, @Context HttpHeaders headers, @Context HttpServletResponse response) /*throws NotFoundException, ServiceUnavailableException, PermissionDeniedException, AuthorizationRequiredException*/ {
String filesRootDirectory = System.getProperty("dataverse.files.directory");
if (filesRootDirectory == null || filesRootDirectory.equals("")) {
filesRootDirectory = "/tmp/files";
}
String fileSystemName = filesRootDirectory + "/temp/" + fileSystemId;
String mimeTypeParam = uriInfo.getQueryParameters().getFirst("mimetype");
String imageThumbFileName = null;
if ("application/pdf".equals(mimeTypeParam)) {
imageThumbFileName = ImageThumbConverter.generatePDFThumb(fileSystemName);
} else {
imageThumbFileName = ImageThumbConverter.generateImageThumb(fileSystemName);
}
// TODO:
// double-check that this temporary preview thumbnail gets deleted
// once the file is saved "for real".
// (or maybe we shouldn't delete it - but instead move it into the
// permanent location... so that it doesn't have to be generated again?)
// -- L.A. Aug. 21 2014
// Update:
// the temporary thumbnail file does get cleaned up now;
// but yeay, maybe we should be saving it permanently instead, as
// the above suggested...
// -- L.A. Feb. 28 2015
if (imageThumbFileName == null) {
return null;
}
/*
removing the old, non-vector default icon:
imageThumbFileName = getWebappImageResource(DEFAULT_FILE_ICON);
}
*/
InputStream in;
try {
in = new FileInputStream(imageThumbFileName);
} catch (Exception ex) {
return null;
}
return in;
}
@Path("fileCardImage/{fileId}")
@GET
@Produces({ "image/png" })
public InputStream fileCardImage(@PathParam("fileId") Long fileId, @Context UriInfo uriInfo, @Context HttpHeaders headers, @Context HttpServletResponse response) /*throws NotFoundException, ServiceUnavailableException, PermissionDeniedException, AuthorizationRequiredException*/ {
DataFile df = dataFileService.find(fileId);
if (df == null) {
logger.warning("Preview: datafile service could not locate a DataFile object for id "+fileId+"!");
return null;
}
DataFileIO thumbnailDataAccess = null;
try {
DataFileIO dataAccess = df.getAccessObject();
if (dataAccess != null && dataAccess.isLocalFile()) {
dataAccess.open();
if ("application/pdf".equalsIgnoreCase(df.getContentType())
|| df.isImage()
|| "application/zipped-shapefile".equalsIgnoreCase(df.getContentType())) {
thumbnailDataAccess = ImageThumbConverter.getImageThumb((FileAccessIO) dataAccess, 48);
}
}
} catch (IOException ioEx) {
return null;
}
if (thumbnailDataAccess != null && thumbnailDataAccess.getInputStream() != null) {
return thumbnailDataAccess.getInputStream();
}
return null;
}
// Note:
// the Dataverse page is no longer using this method.
@Path("dsCardImage/{versionId}")
@GET
@Produces({ "image/png" })
public InputStream dsCardImage(@PathParam("versionId") Long versionId, @Context UriInfo uriInfo, @Context HttpHeaders headers, @Context HttpServletResponse response) /*throws NotFoundException, ServiceUnavailableException, PermissionDeniedException, AuthorizationRequiredException*/ {
DatasetVersion datasetVersion = versionService.find(versionId);
if (datasetVersion == null) {
logger.warning("Preview: Version service could not locate a DatasetVersion object for id "+versionId+"!");
return null;
}
//String imageThumbFileName = null;
DataFileIO thumbnailDataAccess = null;
// First, check if this dataset has a designated thumbnail image:
if (datasetVersion.getDataset() != null) {
DataFile logoDataFile = datasetVersion.getDataset().getThumbnailFile();
if (logoDataFile != null) {
try {
DataFileIO dataAccess = logoDataFile.getAccessObject();
if (dataAccess != null && dataAccess.isLocalFile()) {
dataAccess.open();
thumbnailDataAccess = ImageThumbConverter.getImageThumb((FileAccessIO) dataAccess, 48);
}
} catch (IOException ioEx) {
thumbnailDataAccess = null;
}
}
// If not, we'll try to use one of the files in this dataset version:
/*
if (thumbnailDataAccess == null) {
if (!datasetVersion.getDataset().isHarvested()) {
thumbnailDataAccess = getThumbnailForDatasetVersion(datasetVersion);
}
}
if (thumbnailDataAccess != null && thumbnailDataAccess.getInputStream() != null) {
return thumbnailDataAccess.getInputStream();
}
*/
}
return null;
}
@Path("dvCardImage/{dataverseId}")
@GET
@Produces({ "image/png" })
public InputStream dvCardImage(@PathParam("dataverseId") Long dataverseId, @Context UriInfo uriInfo, @Context HttpHeaders headers, @Context HttpServletResponse response) /*throws NotFoundException, ServiceUnavailableException, PermissionDeniedException, AuthorizationRequiredException*/ {
logger.info("entering dvCardImage");
Dataverse dataverse = dataverseService.find(dataverseId);
if (dataverse == null) {
logger.warning("Preview: Version service could not locate a DatasetVersion object for id "+dataverseId+"!");
return null;
}
String imageThumbFileName = null;
// First, check if the dataverse has a defined logo:
if (dataverse.getDataverseTheme()!=null && dataverse.getDataverseTheme().getLogo() != null && !dataverse.getDataverseTheme().getLogo().equals("")) {
File dataverseLogoFile = getLogo(dataverse);
if (dataverseLogoFile != null) {
logger.info("dvCardImage: logo file found");
String logoThumbNailPath = null;
InputStream in = null;
try {
if (dataverseLogoFile.exists()) {
logoThumbNailPath = ImageThumbConverter.generateImageThumb(dataverseLogoFile.getAbsolutePath(), 48);
if (logoThumbNailPath != null) {
in = new FileInputStream(logoThumbNailPath);
}
}
} catch (Exception ex) {
in = null;
}
if (in != null) {
logger.info("dvCardImage: successfully obtained thumbnail for dataverse logo.");
return in;
}
}
}
// If there's no uploaded logo for this dataverse, go through its
// [released] datasets and see if any of them have card images:
// TODO: figure out if we want to be doing this!
// (efficiency considerations...) -- L.A. 4.0
// And we definitely don't want to be doing this for harvested
// dataverses:
/*
DataFileIO thumbnailDataAccess = null;
if (!dataverse.isHarvested()) {
for (Dataset dataset : datasetService.findPublishedByOwnerId(dataverseId)) {
logger.info("dvCardImage: checking dataset "+dataset.getGlobalId());
if (dataset != null) {
DatasetVersion releasedVersion = dataset.getReleasedVersion();
logger.info("dvCardImage: obtained released version "+releasedVersion.getTitle());
thumbnailDataAccess = getThumbnailForDatasetVersion(releasedVersion);
if (thumbnailDataAccess != null) {
logger.info("dvCardImage: obtained thumbnail for the version.");
break;
}
}
}
}
if (thumbnailDataAccess != null && thumbnailDataAccess.getInputStream() != null) {
return thumbnailDataAccess.getInputStream();
}
*/
return null;
}
// helper methods:
// What the method below does - going through all the files in the version -
// is too expensive! Instead we are now selecting an available thumbnail and
// giving the dataset card a direct link to that file thumbnail. -- L.A., 4.2.2
/*
private DataFileIO getThumbnailForDatasetVersion(DatasetVersion datasetVersion) {
logger.info("entering getThumbnailForDatasetVersion()");
DataFileIO thumbnailDataAccess = null;
if (datasetVersion != null) {
List<FileMetadata> fileMetadatas = datasetVersion.getFileMetadatas();
for (FileMetadata fileMetadata : fileMetadatas) {
DataFile dataFile = fileMetadata.getDataFile();
logger.info("looking at file "+fileMetadata.getLabel()+" , file type "+dataFile.getContentType());
if (dataFile != null && dataFile.isImage()) {
try {
DataFileIO dataAccess = dataFile.getAccessObject();
if (dataAccess != null && dataAccess.isLocalFile()) {
dataAccess.open();
thumbnailDataAccess = ImageThumbConverter.getImageThumb((FileAccessIO) dataAccess, 48);
}
} catch (IOException ioEx) {
thumbnailDataAccess = null;
}
}
if (thumbnailDataAccess != null) {
logger.info("successfully generated thumbnail, returning.");
break;
}
}
}
return thumbnailDataAccess;
}
*/
// TODO:
// put this method into the dataverseservice; use it there
// -- L.A. 4.0 beta14
private File getLogo(Dataverse dataverse) {
if (dataverse.getId() == null) {
return null;
}
DataverseTheme theme = dataverse.getDataverseTheme();
if (theme != null && theme.getLogo() != null && !theme.getLogo().equals("")) {
Properties p = System.getProperties();
String domainRoot = p.getProperty("com.sun.aas.instanceRoot");
if (domainRoot != null && !"".equals(domainRoot)) {
return new File (domainRoot + File.separator +
"docroot" + File.separator +
"logos" + File.separator +
dataverse.getLogoOwnerId() + File.separator +
theme.getLogo());
}
}
return null;
}
/*
removing:
private String getWebappImageResource(String imageName) {
String imageFilePath = null;
String persistenceFilePath = null;
java.net.URL persistenceFileUrl = Thread.currentThread().getContextClassLoader().getResource("META-INF/persistence.xml");
if (persistenceFileUrl != null) {
persistenceFilePath = persistenceFileUrl.getDataFile();
if (persistenceFilePath != null) {
persistenceFilePath = persistenceFilePath.replaceFirst("/[^/]*$", "/");
imageFilePath = persistenceFilePath + "../../../resources/images/" + imageName;
return imageFilePath;
}
logger.warning("Null file path representation of the location of persistence.xml in the webapp root directory!");
} else {
logger.warning("Could not find the location of persistence.xml in the webapp root directory!");
}
return null;
}
*/
// checkAuthorization is a convenience method; it calls the boolean method
// isAccessAuthorized(), the actual workhorse, tand throws a 403 exception if not.
private void checkAuthorization(DataFile df, String apiToken) throws WebApplicationException {
if (!isAccessAuthorized(df, apiToken)) {
throw new WebApplicationException(Response.Status.FORBIDDEN);
}
}
private boolean isAccessAuthorized(DataFile df, String apiToken) {
// First, check if the file belongs to a released Dataset version:
boolean published = false;
// TODO:
// this very likely creates a ton of queries; put some thought into
// optimizing this stuff? -- 4.2.1
//
// update: it appears that we can finally trust the dvObject.isReleased()
// method; so all this monstrous crawling through the filemetadatas,
// below, may not be necessary anymore! - need to verify... L.A. 10.21.2015
// update: NO! we still can't just trust .isReleased(), for these purposes!
// TODO: explain why. L.A. 10.29.2015
if (df.getOwner().getReleasedVersion() != null) {
//logger.fine("file belongs to a dataset with a released version.");
if (df.getOwner().getReleasedVersion().getFileMetadatas() != null) {
//logger.fine("going through the list of filemetadatas that belong to the released version.");
for (FileMetadata fm : df.getOwner().getReleasedVersion().getFileMetadatas()) {
if (df.equals(fm.getDataFile())) {
//logger.fine("found a match!");
published = true;
}
}
}
}
// TODO: (IMPORTANT!)
// Business logic like this should NOT be maintained in individual
// application fragments.
// At the moment it is duplicated here, and inside the Dataset page.
// There are also stubs for file-level permission lookups and caching
// inside Gustavo's view-scoped PermissionsWrapper.
// All this logic needs to be moved to the PermissionServiceBean where it will be
// centrally maintained; with the PermissionsWrapper providing
// efficient cached lookups to the pages (that often need to make
// repeated lookups on the same files). Care will need to be taken
// to preserve the slight differences in logic utilized by the page and
// this Access call (the page checks the restriction flag on the
// filemetadata, not the datafile - as it needs to reflect the permission
// status of the file in the version history).
// I will open a 4.[34] ticket.
//
// -- L.A. 4.2.1
// We don't need to check permissions on files that are
// from released Dataset versions and not restricted:
boolean restricted = false;
if (df.isRestricted()) {
restricted = true;
} else {
// There is also a special case of a restricted file that only exists
// in a draft version (i.e., a new file, that hasn't been published yet).
// Such files must be considered restricted, for access purposes. I.e.,
// users with no download access to this particular file, but with the
// permission to ViewUnpublished on the dataset, should NOT be allowed
// to download it.
// Up until 4.2.1 restricting unpublished files was only restricting them
// in their Draft version fileMetadata, but not in the DataFile object.
// (this is what we still want to do for published files; restricting those
// only restricts them in the new draft FileMetadata; until it becomes the
// published version, the restriction flag on the DataFile is what governs
// the download authorization).
//if (!published && df.getOwner().getVersions().size() == 1 && df.getOwner().getLatestVersion().isDraft()) {
// !df.isReleased() really means just this: new file, only exists in a Draft version!
if (!df.isReleased()) {
if (df.getFileMetadata().isRestricted()) {
restricted = true;
}
}
}
if (!restricted) {
// And if they are not published, they can still be downloaded, if the user
// has the permission to view unpublished versions! (this case will
// be handled below)
if (published) {
return true;
}
}
AuthenticatedUser user = null;
/**
* Authentication/authorization:
*
* note that the fragment below - that retrieves the session object
* and tries to find the user associated with the session - is really
* for logging/debugging purposes only; for practical purposes, it
* would be enough to just call "permissionService.on(df).has(Permission.DownloadFile)"
* and the method does just that, tries to authorize for the user in
* the current session (or guest user, if no session user is available):
*/
if (session != null) {
if (session.getUser() != null) {
if (session.getUser().isAuthenticated()) {
user = (AuthenticatedUser) session.getUser();
} else {
logger.fine("User associated with the session is not an authenticated user. (Guest access will be assumed).");
if (session.getUser() instanceof GuestUser) {
logger.fine("User associated with the session is indeed a guest user.");
}
}
} else {
logger.fine("No user associated with the session.");
}
} else {
logger.fine("Session is null.");
}
AuthenticatedUser apiTokenUser = null;
if ((apiToken != null)&&(apiToken.length()!=64)) {
// We'll also try to obtain the user information from the API token,
// if supplied:
apiTokenUser = findUserByApiToken(apiToken);
if (apiTokenUser == null) {
logger.warning("API token-based auth: Unable to find a user with the API token provided.");
}
}
// OK, let's revisit the case of non-restricted files, this time in
// an unpublished version:
// (if (published) was already addressed above)
if (!restricted) {
// If the file is not published, they can still download the file, if the user
// has the permission to view unpublished versions:
if (permissionService.userOn(user, df.getOwner()).has(Permission.ViewUnpublishedDataset)) {
if (user != null) {
// it's not unthinkable, that a null user (i.e., guest user) could be given
// the ViewUnpublished permission!
logger.fine("Session-based auth: user "+user.getName()+" has access rights on the non-restricted, unpublished datafile.");
}
return true;
}
if (apiTokenUser != null) {
if (permissionService.userOn(apiTokenUser, df.getOwner()).has(Permission.ViewUnpublishedDataset)) {
logger.fine("Session-based auth: user "+apiTokenUser.getName()+" has access rights on the non-restricted, unpublished datafile.");
return true;
}
}
// We don't want to return false just yet.
// If all else fails, we'll want to use the special WorldMapAuth
// token authentication before we give up.
//return false;
} else {
// OK, this is a restricted file.
boolean hasAccessToRestrictedBySession = false;
boolean hasAccessToRestrictedByToken = false;
if (permissionService.on(df).has(Permission.DownloadFile)) {
// Note: PermissionServiceBean.on(Datafile df) will obtain the
// User from the Session object, just like in the code fragment
// above. That's why it's not passed along as an argument.
hasAccessToRestrictedBySession = true;
} else if (apiTokenUser != null && permissionService.userOn(apiTokenUser, df).has(Permission.DownloadFile)) {
hasAccessToRestrictedByToken = true;
}
if (hasAccessToRestrictedBySession || hasAccessToRestrictedByToken) {
if (published) {
if (hasAccessToRestrictedBySession) {
if (user != null) {
logger.fine("Session-based auth: user "+user.getName()+" is granted access to the restricted, published datafile.");
} else {
logger.fine("Session-based auth: guest user is granted access to the restricted, published datafile.");
}
} else {
logger.fine("Token-based auth: user "+apiTokenUser.getName()+" is granted access to the restricted, published datafile.");
}
return true;
} else {
// if the file is NOT published, we will let them download the
// file ONLY if they also have the permission to view
// unpublished verions:
// Note that the code below does not allow a case where it is the
// session user that has the permission on the file, and the API token
// user with the ViewUnpublished permission, or vice versa!
if (hasAccessToRestrictedBySession) {
if (permissionService.on(df.getOwner()).has(Permission.ViewUnpublishedDataset)) {
if (user != null) {
logger.fine("Session-based auth: user " + user.getName() + " is granted access to the restricted, unpublished datafile.");
} else {
logger.fine("Session-based auth: guest user is granted access to the restricted, unpublished datafile.");
}
return true;
}
} else {
if (apiTokenUser != null && permissionService.userOn(apiTokenUser, df.getOwner()).has(Permission.ViewUnpublishedDataset)) {
logger.fine("Token-based auth: user " + apiTokenUser.getName() + " is granted access to the restricted, unpublished datafile.");
}
}
}
}
}
// And if all that failed, we'll still check if the download can be authorized based
// on the special WorldMap token:
if ((apiToken != null)&&(apiToken.length()==64)){
/*
WorldMap token check
- WorldMap tokens are 64 chars in length
- Use the worldMapTokenServiceBean to verify token
and check permissions against the requested DataFile
*/
if (!(this.worldMapTokenServiceBean.isWorldMapTokenAuthorizedForDataFileDownload(apiToken, df))){
return false;
}
// Yes! User may access file
//
logger.fine("WorldMap token-based auth: Token is valid for the requested datafile");
return true;
} else if ((apiToken != null)&&(apiToken.length()!=64)) {
// Will try to obtain the user information from the API token,
// if supplied:
user = findUserByApiToken(apiToken);
if (user == null) {
logger.warning("API token-based auth: Unable to find a user with the API token provided.");
return false;
}
if (permissionService.userOn(user, df).has(Permission.DownloadFile)) {
if (published) {
logger.fine("API token-based auth: User "+user.getName()+" has rights to access the datafile.");
return true;
} else {
// if the file is NOT published, we will let them download the
// file ONLY if they also have the permission to view
// unpublished verions:
if (permissionService.userOn(user, df.getOwner()).has(Permission.ViewUnpublishedDataset)) {
logger.fine("API token-based auth: User "+user.getName()+" has rights to access the (unpublished) datafile.");
return true;
} else {
logger.fine("API token-based auth: User "+user.getName()+" is not authorized to access the (unpublished) datafile.");
}
}
} else {
logger.fine("API token-based auth: User "+user.getName()+" is not authorized to access the datafile.");
}
return false;
}
if (user != null) {
logger.fine("Session-based auth: user " + user.getName() + " has NO access rights on the requested datafile.");
}
if (apiTokenUser != null) {
logger.fine("Token-based auth: user " + apiTokenUser.getName() + " has NO access rights on the requested datafile.");
}
if (user == null && apiTokenUser == null) {
logger.fine("Unauthenticated access: No guest access to the datafile.");
}
return false;
}
} | 1 | 34,921 | @bmckinney does this mean that the bibtex citation will be available via API? | IQSS-dataverse | java |
@@ -28,11 +28,11 @@ if sys.version_info < (3,):
])
POSTGRESQL_REQUIREMENTS = REQUIREMENTS + [
- 'cliquet[postgresql]>=2.14,<3'
+ 'cliquet[postgresql]>=2.15,<3'
]
MONITORING_REQUIREMENTS = REQUIREMENTS + [
- 'cliquet[monitoring]>=2.14,<3'
+ 'cliquet[monitoring]>=2.15,<3'
]
FXA_REQUIREMENTS = REQUIREMENTS + [ | 1 | import codecs
import os
import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
def read_file(filename):
"""Open a related file and return its content."""
with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:
content = f.read()
return content
README = read_file('README.rst')
CHANGELOG = read_file('CHANGELOG.rst')
CONTRIBUTORS = read_file('CONTRIBUTORS.rst')
REQUIREMENTS = [
'waitress',
'cliquet>=2.14,<3',
'jsonschema',
]
if sys.version_info < (3,):
REQUIREMENTS.extend([
'functools32', # not installed by jsonschema with old pip versions.
])
POSTGRESQL_REQUIREMENTS = REQUIREMENTS + [
'cliquet[postgresql]>=2.14,<3'
]
MONITORING_REQUIREMENTS = REQUIREMENTS + [
'cliquet[monitoring]>=2.14,<3'
]
FXA_REQUIREMENTS = REQUIREMENTS + [
'cliquet-fxa'
]
ENTRY_POINTS = {
'paste.app_factory': [
'main = kinto:main',
],
'console_scripts': [
'kinto = kinto.__main__:main'
],
}
DEPENDENCY_LINKS = [
]
setup(name='kinto',
version='1.11.0.dev0',
description='Kinto Web Service - Store, Sync, Share, and Self-Host.',
long_description=README + "\n\n" + CHANGELOG + "\n\n" + CONTRIBUTORS,
license='Apache License (2.0)',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"License :: OSI Approved :: Apache Software License"
],
keywords="web services",
author='Mozilla Services',
author_email='[email protected]',
url='https://github.com/Kinto/kinto',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=REQUIREMENTS,
extras_require={
'postgresql': POSTGRESQL_REQUIREMENTS,
'monitoring': MONITORING_REQUIREMENTS,
'fxa': FXA_REQUIREMENTS,
},
entry_points=ENTRY_POINTS,
dependency_links=DEPENDENCY_LINKS)
| 1 | 8,637 | We should probably tag the cliquet-fxa version as well. | Kinto-kinto | py |
@@ -317,7 +317,7 @@ static socklen_t parse_hostport(h2o_mem_pool_t *pool, h2o_iovec_t host, h2o_iove
4 &&
parsed_len == host.len && d1 <= UCHAR_MAX && d2 <= UCHAR_MAX && d3 <= UCHAR_MAX && d4 <= UCHAR_MAX) {
if (sscanf(port.base, "%" SCNd32 "%n", &_port, &parsed_len) == 1 && parsed_len == port.len && _port <= USHRT_MAX) {
- struct sockaddr_in sin;
+ struct sockaddr_in sin = {};
sin.sin_family = AF_INET;
sin.sin_port = htons(_port);
sin.sin_addr.s_addr = ntohl((d1 << 24) + (d2 << 16) + (d3 << 8) + d4); | 1 | /*
* Copyright (c) 2017 Ichito Nagata, Fastly, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <mruby.h>
#include <mruby/array.h>
#include <mruby/error.h>
#include <mruby/hash.h>
#include <mruby/string.h>
#include <mruby/variable.h>
#include "h2o/mruby_.h"
#include "embedded.c.h"
struct st_mruby_subreq_conn_t {
h2o_conn_t super;
struct {
h2o_iovec_t host;
h2o_iovec_t port;
struct sockaddr_storage addr;
socklen_t len;
} server;
struct {
h2o_iovec_t host;
h2o_iovec_t port;
struct sockaddr_storage addr;
socklen_t len;
} remote;
};
struct st_mruby_subreq_t {
h2o_req_t super;
struct st_mruby_subreq_conn_t conn;
h2o_mruby_context_t *ctx;
h2o_buffer_t *buf;
mrb_value receiver;
struct {
mrb_value request;
mrb_value input_stream;
} refs;
mrb_value error_stream;
struct {
h2o_mruby_generator_t *response;
h2o_mruby_generator_t *body;
} shortcut;
enum { INITIAL, RECEIVED, FINAL_RECEIVED } state;
unsigned char chain_proceed : 1;
};
struct st_h2o_mruby_middleware_sender_t {
h2o_mruby_sender_t super;
h2o_doublebuffer_t sending;
struct st_mruby_subreq_t *subreq;
struct {
h2o_iovec_t *bufs;
size_t bufcnt;
} blocking;
};
static void dispose_subreq(struct st_mruby_subreq_t *subreq)
{
/* subreq must be alive until generator gets disposed if shortcut is used */
assert(subreq->shortcut.response == NULL);
assert(subreq->shortcut.body == NULL);
if (!mrb_nil_p(subreq->error_stream)) {
mrb_gc_unregister(subreq->ctx->shared->mrb, subreq->error_stream);
subreq->error_stream = mrb_nil_value();
}
if (subreq->buf != NULL)
h2o_buffer_dispose(&subreq->buf);
if (!mrb_nil_p(subreq->refs.request))
DATA_PTR(subreq->refs.request) = NULL;
if (!mrb_nil_p(subreq->refs.input_stream))
DATA_PTR(subreq->refs.input_stream) = NULL;
h2o_dispose_request(&subreq->super);
free(subreq);
}
static void on_gc_dispose_app_request(mrb_state *mrb, void *_subreq)
{
struct st_mruby_subreq_t *subreq = _subreq;
if (subreq == NULL)
return;
subreq->refs.request = mrb_nil_value();
if (mrb_nil_p(subreq->refs.input_stream))
dispose_subreq(subreq);
}
static void on_gc_dispose_app_input_stream(mrb_state *mrb, void *_subreq)
{
struct st_mruby_subreq_t *subreq = _subreq;
if (subreq == NULL)
return;
subreq->refs.input_stream = mrb_nil_value();
if (mrb_nil_p(subreq->refs.request))
dispose_subreq(subreq);
}
const static struct mrb_data_type app_request_type = {"app_request_type", on_gc_dispose_app_request};
const static struct mrb_data_type app_input_stream_type = {"app_input_stream", on_gc_dispose_app_input_stream};
static h2o_iovec_t convert_env_to_header_name(h2o_mem_pool_t *pool, const char *name, size_t len)
{
#define KEY_PREFIX "HTTP_"
#define KEY_PREFIX_LEN (sizeof(KEY_PREFIX) - 1)
if (len < KEY_PREFIX_LEN || !h2o_memis(name, KEY_PREFIX_LEN, KEY_PREFIX, KEY_PREFIX_LEN)) {
return h2o_iovec_init(NULL, 0);
}
h2o_iovec_t ret;
ret.len = len - KEY_PREFIX_LEN;
ret.base = h2o_mem_alloc_pool(pool, char, ret.len);
name += KEY_PREFIX_LEN;
char *d = ret.base;
for (; len != 0; ++name, --len)
*d++ = *name == '_' ? '-' : h2o_tolower(*name);
return ret;
#undef KEY_PREFIX
#undef KEY_PREFIX_LEN
}
static int iterate_headers_callback(h2o_mruby_shared_context_t *shared_ctx, h2o_mem_pool_t *pool, h2o_iovec_t *name,
h2o_iovec_t value, void *cb_data)
{
mrb_value result_hash = mrb_obj_value(cb_data);
mrb_value n;
if (h2o_iovec_is_token(name)) {
const h2o_token_t *token = H2O_STRUCT_FROM_MEMBER(h2o_token_t, buf, name);
n = h2o_mruby_token_string(shared_ctx, token);
} else {
n = h2o_mruby_new_str(shared_ctx->mrb, name->base, name->len);
}
mrb_value v = h2o_mruby_new_str(shared_ctx->mrb, value.base, value.len);
mrb_hash_set(shared_ctx->mrb, result_hash, n, v);
return 0;
}
static mrb_value build_app_response(struct st_mruby_subreq_t *subreq)
{
h2o_req_t *req = &subreq->super;
h2o_mruby_context_t *ctx = subreq->ctx;
mrb_state *mrb = ctx->shared->mrb;
/* build response array */
mrb_value resp = mrb_ary_new_capa(mrb, 3);
/* status */
mrb_ary_set(mrb, resp, 0, mrb_fixnum_value(req->res.status));
/* headers */
{
mrb_value headers_hash = mrb_hash_new_capa(mrb, (int)req->res.headers.size);
h2o_mruby_iterate_native_headers(ctx->shared, &req->pool, &req->res.headers, iterate_headers_callback,
mrb_obj_ptr(headers_hash));
if (req->res.content_length != SIZE_MAX) {
h2o_token_t *token = H2O_TOKEN_CONTENT_LENGTH;
mrb_value n = h2o_mruby_new_str(mrb, token->buf.base, token->buf.len);
mrb_value v = h2o_mruby_to_str(mrb, mrb_fixnum_value(req->res.content_length));
mrb_hash_set(mrb, headers_hash, n, v);
}
mrb_ary_set(mrb, resp, 1, headers_hash);
}
/* body */
{
mrb_value body = h2o_mruby_create_data_instance(
mrb, mrb_ary_entry(ctx->shared->constants, H2O_MRUBY_APP_INPUT_STREAM_CLASS), subreq, &app_input_stream_type);
mrb_funcall(mrb, body, "initialize", 0);
mrb_ary_set(mrb, resp, 2, body);
}
return resp;
}
static void append_bufs(struct st_mruby_subreq_t *subreq, h2o_iovec_t *inbufs, size_t inbufcnt)
{
int i;
for (i = 0; i != inbufcnt; ++i) {
h2o_buffer_append(&subreq->buf, inbufs[i].base, inbufs[i].len);
}
}
static mrb_value detach_receiver(struct st_mruby_subreq_t *subreq)
{
mrb_value receiver = subreq->receiver;
assert(!mrb_nil_p(receiver));
subreq->receiver = mrb_nil_value();
mrb_gc_unregister(subreq->ctx->shared->mrb, receiver);
mrb_gc_protect(subreq->ctx->shared->mrb, receiver);
return receiver;
}
static void send_response_shortcutted(struct st_mruby_subreq_t *subreq);
static void subreq_ostream_send(h2o_ostream_t *_self, h2o_req_t *_subreq, h2o_iovec_t *inbufs, size_t inbufcnt,
h2o_send_state_t state)
{
struct st_mruby_subreq_t *subreq = (void *)_subreq;
mrb_state *mrb = subreq->ctx->shared->mrb;
/* body shortcut */
if (subreq->shortcut.body != NULL) {
if (subreq->shortcut.body->sender->final_sent)
return; /* TODO: close subreq ASAP */
subreq->chain_proceed = 1;
if (subreq->buf == NULL) {
/* flushing chunks has been finished, so send directly */
h2o_mruby_sender_do_send(subreq->shortcut.body, inbufs, inbufcnt, state);
} else {
/* flushing, buffer chunks again */
append_bufs(subreq, inbufs, inbufcnt);
}
return;
}
int is_first = subreq->state == INITIAL;
if (h2o_send_state_is_in_progress(state)) {
h2o_proceed_response_deferred(&subreq->super);
subreq->state = RECEIVED;
} else {
subreq->state = FINAL_RECEIVED;
}
append_bufs(subreq, inbufs, inbufcnt);
/* response shortcut */
if (subreq->shortcut.response != NULL) {
send_response_shortcutted(subreq);
return;
}
if (mrb_nil_p(subreq->receiver))
return;
int gc_arena = mrb_gc_arena_save(mrb);
if (is_first) {
/* the fiber is waiting due to calling req.join */
h2o_mruby_run_fiber(subreq->ctx, detach_receiver(subreq), mrb_nil_value(), NULL);
} else if (subreq->buf->size != 0) {
/* resume callback sender fiber */
mrb_value chunk = h2o_mruby_new_str(mrb, subreq->buf->bytes, subreq->buf->size);
h2o_buffer_consume(&subreq->buf, subreq->buf->size);
h2o_mruby_run_fiber(subreq->ctx, detach_receiver(subreq), chunk, NULL);
} else if (subreq->state == FINAL_RECEIVED) {
h2o_mruby_run_fiber(subreq->ctx, detach_receiver(subreq), mrb_nil_value(), NULL);
}
mrb_gc_arena_restore(mrb, gc_arena);
}
static void prepare_subreq_entity(h2o_req_t *subreq, h2o_mruby_context_t *ctx, mrb_value rack_input)
{
mrb_state *mrb = ctx->shared->mrb;
if (mrb_nil_p(rack_input)) {
subreq->entity = h2o_iovec_init(NULL, 0);
subreq->content_length = 0;
return;
}
// TODO: fastpath?
if (!mrb_respond_to(mrb, rack_input, mrb_intern_lit(mrb, "read"))) {
mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "'rack.input' must respond to 'read'"));
return;
}
mrb_value body = mrb_funcall(mrb, rack_input, "read", 0);
if (mrb->exc != NULL)
return;
if (!mrb_string_p(body)) {
mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "return value of `read` must be a string"));
return;
}
subreq->entity = h2o_strdup(&subreq->pool, RSTRING_PTR(body), RSTRING_LEN(body));
if (subreq->content_length == SIZE_MAX) {
subreq->content_length = subreq->entity.len;
} else {
if (subreq->content_length > subreq->entity.len)
subreq->content_length = subreq->entity.len;
else if (subreq->content_length < subreq->entity.len)
subreq->entity.len = subreq->content_length;
}
}
static socklen_t parse_hostport(h2o_mem_pool_t *pool, h2o_iovec_t host, h2o_iovec_t port, struct sockaddr_storage *ss)
{
/* fast path for IPv4 addresses */
{
unsigned int d1, d2, d3, d4, _port;
int parsed_len;
if (sscanf(host.base, "%" SCNd32 "%*[.]%" SCNd32 "%*[.]%" SCNd32 "%*[.]%" SCNd32 "%n", &d1, &d2, &d3, &d4, &parsed_len) ==
4 &&
parsed_len == host.len && d1 <= UCHAR_MAX && d2 <= UCHAR_MAX && d3 <= UCHAR_MAX && d4 <= UCHAR_MAX) {
if (sscanf(port.base, "%" SCNd32 "%n", &_port, &parsed_len) == 1 && parsed_len == port.len && _port <= USHRT_MAX) {
struct sockaddr_in sin;
sin.sin_family = AF_INET;
sin.sin_port = htons(_port);
sin.sin_addr.s_addr = ntohl((d1 << 24) + (d2 << 16) + (d3 << 8) + d4);
*ss = *((struct sockaddr_storage *)&sin);
return sizeof(sin);
}
}
}
/* call getaddrinfo */
struct addrinfo hints, *res = NULL;
char *hostname = h2o_mem_alloc_pool(pool, char, host.len + 1);
memcpy(hostname, host.base, host.len);
hostname[host.len] = '\0';
char *servname = h2o_mem_alloc_pool(pool, char, port.len + 1);
memcpy(servname, port.base, port.len);
hostname[port.len] = '\0';
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
hints.ai_flags = AI_ADDRCONFIG | AI_NUMERICSERV;
if (getaddrinfo(hostname, servname, &hints, &res) != 0) {
goto Error;
}
switch (res->ai_family) {
case AF_INET:
case AF_INET6:
memcpy(ss, res->ai_addr, res->ai_addrlen);
break;
default:
goto Error;
}
socklen_t len = res->ai_addrlen;
freeaddrinfo(res);
return len;
Error:
if (res != NULL)
freeaddrinfo(res);
return 0;
}
static socklen_t get_sockname(h2o_conn_t *_conn, struct sockaddr *sa)
{
struct st_mruby_subreq_conn_t *conn = (void *)_conn;
if (conn->server.host.base != NULL) {
struct st_mruby_subreq_t *subreq = H2O_STRUCT_FROM_MEMBER(struct st_mruby_subreq_t, conn, conn);
conn->server.len = parse_hostport(&subreq->super.pool, conn->server.host, conn->server.port, &conn->server.addr);
conn->server.host.base = NULL;
}
memcpy(sa, &conn->server.addr, conn->server.len);
return conn->server.len;
}
static socklen_t get_peername(h2o_conn_t *_conn, struct sockaddr *sa)
{
struct st_mruby_subreq_conn_t *conn = (void *)_conn;
if (conn->remote.host.base != NULL) {
struct st_mruby_subreq_t *subreq = H2O_STRUCT_FROM_MEMBER(struct st_mruby_subreq_t, conn, conn);
conn->remote.len = parse_hostport(&subreq->super.pool, conn->remote.host, conn->remote.port, &conn->remote.addr);
conn->remote.host.base = NULL;
}
memcpy(sa, &conn->remote.addr, conn->remote.len);
return conn->remote.len;
}
static h2o_socket_t *get_socket(h2o_conn_t *conn)
{
return NULL;
}
static int handle_header_env_key(h2o_mruby_shared_context_t *shared_ctx, h2o_iovec_t *env_key, h2o_iovec_t value, void *_req)
{
h2o_req_t *req = _req;
const h2o_token_t *token;
/* convert env key to header name (lower case) */
h2o_iovec_t name = convert_env_to_header_name(&req->pool, env_key->base, env_key->len);
if (name.base == NULL)
return 0;
if ((token = h2o_lookup_token(name.base, name.len)) != NULL) {
if (token == H2O_TOKEN_CONTENT_LENGTH) {
/* skip. use CONTENT_LENGTH instead of HTTP_CONTENT_LENGTH */
} else {
value = h2o_strdup(&req->pool, value.base, value.len);
h2o_add_header(&req->pool, &req->headers, token, NULL, value.base, value.len);
}
} else {
value = h2o_strdup(&req->pool, value.base, value.len);
h2o_add_header_by_str(&req->pool, &req->headers, name.base, name.len, 0, NULL, value.base, value.len);
}
return 0;
}
static void on_subreq_error_callback(void *data, h2o_iovec_t prefix, h2o_iovec_t msg)
{
struct st_mruby_subreq_t *subreq = (void *)data;
mrb_state *mrb = subreq->ctx->shared->mrb;
assert(!mrb_nil_p(subreq->error_stream));
h2o_iovec_t concat = h2o_concat(&subreq->super.pool, prefix, msg);
mrb_value msgstr = h2o_mruby_new_str(mrb, concat.base, concat.len);
mrb_funcall(mrb, subreq->error_stream, "write", 1, msgstr);
if (mrb->exc != NULL) {
fprintf(stderr, "%s\n", RSTRING_PTR(mrb_inspect(mrb, mrb_obj_value(mrb->exc))));
mrb->exc = NULL;
}
}
/**
* relaxed parsing of HTTP version that defaults to 1.1
*/
static int parse_protocol_version(const char *s, size_t len)
{
int ver;
if (len < 6)
goto Default;
if (memcmp(s, "HTTP/", 5) != 0)
goto Default;
if (!('0' <= s[5] && s[5] <= '9'))
goto Default;
ver = (s[5] - '0') * 0x100;
if (len >= 8 && s[6] == '.' && ('0' <= s[7] && s[7] <= '9'))
ver += s[7] - '0';
return ver;
Default:
return 0x101;
}
static struct st_mruby_subreq_t *create_subreq(h2o_mruby_context_t *ctx, mrb_value env, int is_reprocess)
{
static const h2o_conn_callbacks_t callbacks = {get_sockname, /* stringify address */
get_peername, /* ditto */
NULL, /* push (no push in subrequest) */
get_socket, /* get underlying socket */
NULL, /* get debug state */
{{{NULL}}}};
mrb_state *mrb = ctx->shared->mrb;
int gc_arena = mrb_gc_arena_save(mrb);
mrb_gc_protect(mrb, env);
/* create subreq */
struct st_mruby_subreq_t *subreq = h2o_mem_alloc(sizeof(*subreq));
memset(&subreq->conn, 0, sizeof(subreq->conn));
subreq->ctx = ctx;
subreq->receiver = mrb_nil_value();
subreq->refs.request = mrb_nil_value();
subreq->refs.input_stream = mrb_nil_value();
h2o_buffer_init(&subreq->buf, &h2o_socket_buffer_prototype);
subreq->shortcut.response = NULL;
subreq->shortcut.body = NULL;
subreq->state = INITIAL;
subreq->chain_proceed = 0;
/* initialize super and conn */
subreq->conn.super.ctx = ctx->shared->ctx;
h2o_init_request(&subreq->super, &subreq->conn.super, NULL);
subreq->super.is_subrequest = 1;
h2o_ostream_t *ostream = h2o_add_ostream(&subreq->super, H2O_ALIGNOF(*ostream), sizeof(*ostream), &subreq->super._ostr_top);
ostream->do_send = subreq_ostream_send;
subreq->conn.super.hosts = ctx->handler->pathconf->global->hosts;
subreq->conn.super.connected_at = (struct timeval){0}; /* no need because subreq won't logged */
subreq->conn.super.id = 0; /* currently conn->id is used only for logging, so set zero as a meaningless value */
subreq->conn.super.callbacks = &callbacks;
/* retrieve env variables */
mrb_value scheme = mrb_nil_value();
mrb_value method = mrb_nil_value();
mrb_value script_name = mrb_nil_value();
mrb_value path_info = mrb_nil_value();
mrb_value query_string = mrb_nil_value();
mrb_value rack_input = mrb_nil_value();
mrb_value http_host = mrb_nil_value();
mrb_value server_name = mrb_nil_value();
mrb_value server_port = mrb_nil_value();
mrb_value server_addr = mrb_nil_value();
mrb_value remote_addr = mrb_nil_value();
mrb_value remote_port = mrb_nil_value();
mrb_value server_protocol = mrb_nil_value();
mrb_value remaining_delegations = mrb_nil_value();
mrb_value remaining_reprocesses = mrb_nil_value();
mrb_value rack_errors = mrb_nil_value();
#define RETRIEVE_ENV(val, stringify, numify) \
do { \
val = value; \
if (!mrb_nil_p(val)) { \
if (stringify) \
val = h2o_mruby_to_str(mrb, val); \
if (numify) \
val = h2o_mruby_to_int(mrb, val); \
if (mrb->exc != NULL) \
goto Failed; \
} \
} while (0)
#define RETRIEVE_ENV_OBJ(val) RETRIEVE_ENV(val, 0, 0);
#define RETRIEVE_ENV_STR(val) RETRIEVE_ENV(val, 1, 0);
#define RETRIEVE_ENV_NUM(val) RETRIEVE_ENV(val, 0, 1);
#define COND0(str, lit, pos) (sizeof(lit) - 1 <= (pos) || (str)[pos] == (lit)[pos])
#define COND1(str, lit, pos) (COND0(str, lit, pos) && COND0(str, lit, pos + 1) && COND0(str, lit, pos + 2))
#define COND2(str, lit, pos) (COND1(str, lit, pos) && COND1(str, lit, pos + 3) && COND1(str, lit, pos + 6))
#define COND(str, lit) (COND2(str, lit, 0) && COND2(str, lit, 9) && COND2(str, lit, 18))
#define CHECK_KEY(lit) ((sizeof(lit) - 1) == keystr_len && COND(keystr, lit))
khiter_t k;
khash_t(ht) *h = mrb_hash_tbl(mrb, env);
for (k = kh_begin(h); k != kh_end(h); ++k) {
if (!kh_exist(h, k))
continue;
mrb_value key = h2o_mruby_to_str(mrb, kh_key(h, k));
if (mrb->exc != NULL)
goto Failed;
mrb_value value = kh_value(h, k).v;
const char *keystr = RSTRING_PTR(key);
const mrb_int keystr_len = RSTRING_LEN(key);
if (CHECK_KEY("CONTENT_LENGTH")) {
mrb_value content_length = mrb_nil_value();
RETRIEVE_ENV_NUM(content_length);
if (!mrb_nil_p(content_length))
subreq->super.content_length = mrb_fixnum(content_length);
} else if (CHECK_KEY("HTTP_HOST")) {
RETRIEVE_ENV_STR(http_host);
} else if (CHECK_KEY("PATH_INFO")) {
RETRIEVE_ENV_STR(path_info);
} else if (CHECK_KEY("QUERY_STRING")) {
RETRIEVE_ENV_STR(query_string);
} else if (CHECK_KEY("REMOTE_ADDR")) {
RETRIEVE_ENV_STR(remote_addr);
} else if (CHECK_KEY("REMOTE_PORT")) {
RETRIEVE_ENV_STR(remote_port);
} else if (CHECK_KEY("REQUEST_METHOD")) {
RETRIEVE_ENV_STR(method);
} else if (CHECK_KEY("SCRIPT_NAME")) {
RETRIEVE_ENV_STR(script_name);
} else if (CHECK_KEY("SERVER_ADDR")) {
RETRIEVE_ENV_STR(server_addr);
} else if (CHECK_KEY("SERVER_NAME")) {
RETRIEVE_ENV_STR(server_name);
} else if (CHECK_KEY("SERVER_PORT")) {
RETRIEVE_ENV_STR(server_port);
} else if (CHECK_KEY("SERVER_PROTOCOL")) {
RETRIEVE_ENV_STR(server_protocol);
} else if (CHECK_KEY("SERVER_SOFTWARE")) {
} else if (CHECK_KEY("h2o.remaining_delegations")) {
RETRIEVE_ENV_NUM(remaining_delegations);
} else if (CHECK_KEY("h2o.remaining_reprocesses")) {
RETRIEVE_ENV_NUM(remaining_reprocesses);
} else if (CHECK_KEY("rack.errors")) {
RETRIEVE_ENV_OBJ(rack_errors);
} else if (CHECK_KEY("rack.hijack?")) {
} else if (CHECK_KEY("rack.input")) {
RETRIEVE_ENV_OBJ(rack_input);
} else if (CHECK_KEY("rack.multiprocess")) {
} else if (CHECK_KEY("rack.multithread")) {
} else if (CHECK_KEY("rack.run_once")) {
} else if (CHECK_KEY("rack.url_scheme")) {
RETRIEVE_ENV_STR(scheme);
} else if (keystr_len >= 5 && memcmp(keystr, "HTTP_", 5) == 0) {
mrb_value http_header = mrb_nil_value();
RETRIEVE_ENV_STR(http_header);
if (!mrb_nil_p(http_header))
h2o_mruby_iterate_header_values(ctx->shared, key, http_header, handle_header_env_key, &subreq->super);
} else if (keystr_len != 0) {
/* set to req->env */
mrb_value reqenv = mrb_nil_value();
RETRIEVE_ENV_STR(reqenv);
if (!mrb_nil_p(reqenv)) {
h2o_vector_reserve(&subreq->super.pool, &subreq->super.env, subreq->super.env.size + 2);
subreq->super.env.entries[subreq->super.env.size] = h2o_strdup(&subreq->super.pool, keystr, keystr_len);
subreq->super.env.entries[subreq->super.env.size + 1] =
h2o_strdup(&subreq->super.pool, RSTRING_PTR(reqenv), RSTRING_LEN(reqenv));
subreq->super.env.size += 2;
}
}
}
#undef RETRIEVE_ENV
#undef RETRIEVE_ENV_OBJ
#undef RETRIEVE_ENV_STR
#undef RETRIEVE_ENV_NUM
#undef COND0
#undef COND1
#undef COND2
#undef COND
#undef CHECK_KEY
/* do validations */
#define CHECK_REQUIRED(k, v, non_empty) \
do { \
if (mrb_nil_p(v)) { \
mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "missing required environment key: " k)); \
goto Failed; \
} else if (non_empty && RSTRING_LEN(v) == 0) { \
mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, k " must be not empty")); \
goto Failed; \
} \
} while (0)
CHECK_REQUIRED("REQUEST_METHOD", method, 1);
CHECK_REQUIRED("rack.url_scheme", scheme, 1);
CHECK_REQUIRED("SCRIPT_NAME", script_name, 0);
CHECK_REQUIRED("PATH_INFO", path_info, 0);
CHECK_REQUIRED("QUERY_STRING", query_string, 0);
#undef CHECK_REQUIRED
if (RSTRING_LEN(script_name) != 0 && RSTRING_PTR(script_name)[0] != '/') {
mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "SCRIPT_NAME must start with `/`"));
goto Failed;
}
if (RSTRING_LEN(path_info) != 0 && RSTRING_PTR(path_info)[0] != '/') {
mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "PATH_INFO must start with `/`"));
goto Failed;
}
if (mrb_nil_p(http_host) && (mrb_nil_p(server_name) || mrb_nil_p(server_port))) {
mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "HTTP_HOST or (SERVER_NAME and SERVER_PORT) is required"));
goto Failed;
}
if (!is_reprocess) {
/* ensure that SCRIPT_NAME is not modified */
h2o_iovec_t confpath = ctx->handler->pathconf->path;
size_t confpath_len_wo_slash = confpath.base[confpath.len - 1] == '/' ? confpath.len - 1 : confpath.len;
if (!(RSTRING_LEN(script_name) == confpath_len_wo_slash &&
memcmp(RSTRING_PTR(script_name), confpath.base, confpath_len_wo_slash) == 0)) {
mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(
mrb, E_RUNTIME_ERROR, "can't modify `SCRIPT_NAME` with `H2O.next`. Is `H2O.reprocess` what you want?"));
goto Failed;
}
}
#define STR_TO_IOVEC(val) h2o_iovec_init(RSTRING_PTR(val), RSTRING_LEN(val))
/* construct url and parse */
h2o_iovec_t url_comps[9];
int num_comps = 0;
url_comps[num_comps++] = STR_TO_IOVEC(scheme);
url_comps[num_comps++] = h2o_iovec_init(H2O_STRLIT("://"));
if (!mrb_nil_p(http_host)) {
url_comps[num_comps++] = STR_TO_IOVEC(http_host);
} else {
url_comps[num_comps++] = STR_TO_IOVEC(server_name);
url_comps[num_comps++] = h2o_iovec_init(H2O_STRLIT(":"));
url_comps[num_comps++] = STR_TO_IOVEC(server_port);
}
url_comps[num_comps++] = STR_TO_IOVEC(script_name);
url_comps[num_comps++] = STR_TO_IOVEC(path_info);
if (RSTRING_LEN(query_string) != 0) {
url_comps[num_comps++] = h2o_iovec_init(H2O_STRLIT("?"));
url_comps[num_comps++] = STR_TO_IOVEC(query_string);
}
h2o_iovec_t url_str = h2o_concat_list(&subreq->super.pool, url_comps, num_comps);
h2o_url_t url_parsed;
if (h2o_url_parse(url_str.base, url_str.len, &url_parsed) != 0) {
/* TODO is there any other way to show better error message? */
mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_ARGUMENT_ERROR, "env variable contains invalid values"));
goto Failed;
}
/* setup req and conn using retrieved values */
subreq->super.input.scheme = url_parsed.scheme;
subreq->super.input.method = h2o_strdup(&subreq->super.pool, RSTRING_PTR(method), RSTRING_LEN(method));
subreq->super.input.authority = h2o_strdup(&subreq->super.pool, url_parsed.authority.base, url_parsed.authority.len);
subreq->super.input.path = h2o_strdup(&subreq->super.pool, url_parsed.path.base, url_parsed.path.len);
h2o_hostconf_t *hostconf = h2o_req_setup(&subreq->super);
subreq->super.hostconf = hostconf;
subreq->super.pathconf = ctx->handler->pathconf;
subreq->super.handler = &ctx->handler->super;
subreq->super.version = parse_protocol_version(RSTRING_PTR(server_protocol), RSTRING_LEN(server_protocol));
if (!mrb_nil_p(server_addr) && !mrb_nil_p(server_port)) {
subreq->conn.server.host = h2o_strdup(&subreq->super.pool, RSTRING_PTR(server_addr), RSTRING_LEN(server_addr));
subreq->conn.server.port = h2o_strdup(&subreq->super.pool, RSTRING_PTR(server_port), RSTRING_LEN(server_port));
}
if (!mrb_nil_p(remote_addr) && !mrb_nil_p(remote_port)) {
subreq->conn.remote.host = h2o_strdup(&subreq->super.pool, RSTRING_PTR(remote_addr), RSTRING_LEN(remote_addr));
subreq->conn.remote.port = h2o_strdup(&subreq->super.pool, RSTRING_PTR(remote_port), RSTRING_LEN(remote_port));
}
if (!mrb_nil_p(remaining_delegations)) {
mrb_int v = mrb_fixnum(remaining_delegations);
subreq->super.remaining_delegations = (unsigned)(v < 0 ? 0 : v);
}
if (!mrb_nil_p(remaining_reprocesses)) {
mrb_int v = mrb_fixnum(remaining_reprocesses);
subreq->super.remaining_reprocesses = (unsigned)(v < 0 ? 0 : v);
}
if (!mrb_nil_p(rack_errors)) {
subreq->error_stream = rack_errors;
mrb_gc_register(mrb, rack_errors);
subreq->super.error_log_delegate.cb = on_subreq_error_callback;
subreq->super.error_log_delegate.data = subreq;
}
prepare_subreq_entity(&subreq->super, ctx, rack_input);
if (mrb->exc != NULL)
goto Failed;
return subreq;
Failed:
assert(mrb->exc != NULL);
dispose_subreq(subreq);
mrb_gc_arena_restore(mrb, gc_arena);
return NULL;
#undef STR_TO_IOVEC
}
static mrb_value middleware_wait_response_callback(h2o_mruby_context_t *mctx, mrb_value input, mrb_value *receiver, mrb_value args,
int *run_again)
{
mrb_state *mrb = mctx->shared->mrb;
struct st_mruby_subreq_t *subreq;
if ((subreq = mrb_data_check_get_ptr(mrb, mrb_ary_entry(args, 0), &app_request_type)) == NULL) {
*run_again = 1;
return mrb_exc_new_str_lit(mrb, E_ARGUMENT_ERROR, "AppRequest#join wrong self");
}
subreq->receiver = *receiver;
mrb_gc_register(mrb, *receiver);
return mrb_nil_value();
}
static mrb_value can_build_response_method(mrb_state *mrb, mrb_value self)
{
struct st_mruby_subreq_t *subreq = mrb_data_check_get_ptr(mrb, self, &app_request_type);
if (subreq == NULL)
mrb_raise(mrb, E_ARGUMENT_ERROR, "AppRequest#_can_build_response? wrong self");
return mrb_bool_value(subreq->state != INITIAL);
}
static mrb_value build_response_method(mrb_state *mrb, mrb_value self)
{
struct st_mruby_subreq_t *subreq = mrb_data_check_get_ptr(mrb, self, &app_request_type);
if (subreq == NULL)
mrb_raise(mrb, E_ARGUMENT_ERROR, "AppRequest#build_response wrong self");
mrb_value resp = build_app_response(subreq);
subreq->refs.input_stream = mrb_ary_entry(resp, 2);
return resp;
}
static mrb_value middleware_request_method(mrb_state *mrb, mrb_value self)
{
h2o_mruby_shared_context_t *shared_ctx = mrb->ud;
h2o_mruby_context_t *ctx = shared_ctx->current_context;
assert(ctx != NULL);
mrb_value env;
mrb_value reprocess;
mrb_get_args(mrb, "H", &env);
reprocess = mrb_iv_get(mrb, self, mrb_intern_lit(mrb, "@reprocess"));
/* create subreq */
struct st_mruby_subreq_t *subreq = create_subreq(shared_ctx->current_context, env, mrb_bool(reprocess));
if (mrb->exc != NULL) {
mrb_value exc = mrb_obj_value(mrb->exc);
mrb->exc = NULL;
mrb_exc_raise(mrb, exc);
}
subreq->refs.request = h2o_mruby_create_data_instance(mrb, mrb_ary_entry(ctx->shared->constants, H2O_MRUBY_APP_REQUEST_CLASS),
subreq, &app_request_type);
h2o_req_t *super = &subreq->super;
if (mrb_bool(reprocess)) {
h2o_reprocess_request_deferred(super, super->method, super->scheme, super->authority, super->path, super->overrides, 1);
} else {
h2o_delegate_request_deferred(super);
}
return subreq->refs.request;
}
static mrb_value middleware_wait_chunk_callback(h2o_mruby_context_t *mctx, mrb_value input, mrb_value *receiver, mrb_value args,
int *run_again)
{
mrb_state *mrb = mctx->shared->mrb;
struct st_mruby_subreq_t *subreq;
mrb_value obj = mrb_ary_entry(args, 0);
if (DATA_PTR(obj) == NULL) {
return mrb_exc_new_str_lit(mrb, E_ARGUMENT_ERROR, "downstream HTTP closed");
} else if ((subreq = mrb_data_check_get_ptr(mrb, obj, &app_input_stream_type)) == NULL) {
return mrb_exc_new_str_lit(mrb, E_ARGUMENT_ERROR, "AppInputStream#each wrong self");
}
if (subreq->buf->size != 0) {
*run_again = 1;
mrb_value chunk = h2o_mruby_new_str(mrb, subreq->buf->bytes, subreq->buf->size);
h2o_buffer_consume(&subreq->buf, subreq->buf->size);
return chunk;
} else if (subreq->state == FINAL_RECEIVED) {
*run_again = 1;
return mrb_nil_value();
} else {
assert(mrb_nil_p(subreq->receiver));
subreq->receiver = *receiver;
mrb_gc_register(mrb, *receiver);
return mrb_nil_value();
}
}
void h2o_mruby_middleware_init_context(h2o_mruby_shared_context_t *shared_ctx)
{
mrb_state *mrb = shared_ctx->mrb;
h2o_mruby_eval_expr(mrb, H2O_MRUBY_CODE_MIDDLEWARE);
h2o_mruby_assert(mrb);
struct RClass *module = mrb_define_module(mrb, "H2O");
struct RClass *app_klass = mrb_class_get_under(shared_ctx->mrb, module, "App");
mrb_define_method(mrb, app_klass, "request", middleware_request_method, MRB_ARGS_ARG(1, 0));
struct RClass *app_request_klass = mrb_class_get_under(shared_ctx->mrb, module, "AppRequest");
mrb_ary_set(shared_ctx->mrb, shared_ctx->constants, H2O_MRUBY_APP_REQUEST_CLASS, mrb_obj_value(app_request_klass));
h2o_mruby_define_callback(mrb, "_h2o_middleware_wait_response", middleware_wait_response_callback);
mrb_define_method(mrb, app_request_klass, "_can_build_response?", can_build_response_method, MRB_ARGS_NONE());
mrb_define_method(mrb, app_request_klass, "_build_response", build_response_method, MRB_ARGS_NONE());
struct RClass *app_input_stream_klass = mrb_class_get_under(shared_ctx->mrb, module, "AppInputStream");
mrb_ary_set(shared_ctx->mrb, shared_ctx->constants, H2O_MRUBY_APP_INPUT_STREAM_CLASS, mrb_obj_value(app_input_stream_klass));
h2o_mruby_define_callback(mrb, "_h2o_middleware_wait_chunk", middleware_wait_chunk_callback);
h2o_mruby_assert(mrb);
}
void do_sender_start(h2o_mruby_generator_t *generator)
{
struct st_h2o_mruby_middleware_sender_t *sender = (void *)generator->sender;
struct st_mruby_subreq_t *subreq = sender->subreq;
if (subreq->buf->size == 0 && subreq->state != FINAL_RECEIVED) {
h2o_doublebuffer_prepare_empty(&sender->sending);
h2o_send(generator->req, NULL, 0, H2O_SEND_STATE_IN_PROGRESS);
} else {
h2o_mruby_sender_do_send_buffer(generator, &sender->sending, &subreq->buf,
subreq->state == FINAL_RECEIVED ? H2O_SEND_STATE_FINAL : H2O_SEND_STATE_IN_PROGRESS);
}
}
void do_sender_proceed(h2o_generator_t *_generator, h2o_req_t *req)
{
h2o_mruby_generator_t *generator = (void *)_generator;
struct st_h2o_mruby_middleware_sender_t *sender = (void *)generator->sender;
struct st_mruby_subreq_t *subreq = sender->subreq;
if (generator->sender->final_sent)
return; /* TODO: close subreq ASAP */
if (subreq->buf != NULL) {
h2o_doublebuffer_consume(&sender->sending);
if (subreq->buf->size != 0) {
h2o_mruby_sender_do_send_buffer(generator, &sender->sending, &subreq->buf,
subreq->state == FINAL_RECEIVED ? H2O_SEND_STATE_FINAL : H2O_SEND_STATE_IN_PROGRESS);
return; /* don't proceed because it's already requested in subreq_ostream_send*/
} else {
/* start direct shortcut */
h2o_buffer_dispose(&subreq->buf);
subreq->buf = NULL;
}
}
if (sender->subreq->chain_proceed)
h2o_proceed_response(&sender->subreq->super);
}
void do_sender_dispose(h2o_mruby_generator_t *generator)
{
struct st_h2o_mruby_middleware_sender_t *sender = (void *)generator->sender;
h2o_doublebuffer_dispose(&sender->sending);
if (sender->subreq->shortcut.response != NULL) {
assert(!mrb_nil_p(sender->subreq->refs.request));
mrb_gc_unregister(generator->ctx->shared->mrb, sender->subreq->refs.request);
sender->subreq->shortcut.response = NULL;
}
assert(sender->subreq->shortcut.body == generator);
sender->subreq->shortcut.body = NULL;
dispose_subreq(sender->subreq);
sender->subreq = NULL;
h2o_mruby_sender_close_body(generator);
}
static h2o_mruby_sender_t *create_sender(h2o_mruby_generator_t *generator, struct st_mruby_subreq_t *subreq, mrb_value body)
{
struct st_h2o_mruby_middleware_sender_t *sender =
(void *)h2o_mruby_sender_create(generator, body, H2O_ALIGNOF(*sender), sizeof(*sender));
sender->subreq = subreq;
h2o_doublebuffer_init(&sender->sending, &h2o_socket_buffer_prototype);
sender->super.start = do_sender_start;
sender->super.proceed = do_sender_proceed;
sender->super.dispose = do_sender_dispose;
subreq->shortcut.body = generator;
return &sender->super;
}
h2o_mruby_sender_t *h2o_mruby_middleware_sender_create(h2o_mruby_generator_t *generator, mrb_value body)
{
mrb_state *mrb = generator->ctx->shared->mrb;
struct st_mruby_subreq_t *subreq;
assert(mrb->exc == NULL);
if ((subreq = mrb_data_check_get_ptr(mrb, body, &app_input_stream_type)) == NULL)
return NULL;
return create_sender(generator, subreq, body);
}
static void send_response_shortcutted(struct st_mruby_subreq_t *subreq)
{
h2o_mruby_generator_t *generator = subreq->shortcut.response;
assert(generator != NULL);
/* copy response except for headers and original */
generator->req->res.status = subreq->super.res.status;
generator->req->res.reason = subreq->super.res.reason;
generator->req->res.content_length = subreq->super.res.content_length;
generator->req->res.mime_attr = subreq->super.res.mime_attr;
/* handle response headers */
int i;
for (i = 0; i != subreq->super.res.headers.size; ++i) {
h2o_header_t *header = subreq->super.res.headers.entries + i;
h2o_mruby_set_response_header(generator->ctx->shared, header->name, header->value, generator->req);
}
/* add date: if it's missing from the response */
if (h2o_find_header(&generator->req->res.headers, H2O_TOKEN_DATE, SIZE_MAX) == -1)
h2o_resp_add_date_header(generator->req);
/* setup body sender */
h2o_mruby_sender_t *sender = create_sender(generator, subreq, mrb_nil_value());
generator->sender = sender;
generator->super.proceed = sender->proceed;
/* start sending response */
h2o_start_response(generator->req, &generator->super);
generator->sender->start(generator);
}
static int send_response_callback(h2o_mruby_generator_t *generator, mrb_int status, mrb_value resp, int *is_delegate)
{
struct st_mruby_subreq_t *subreq = mrb_data_check_get_ptr(generator->ctx->shared->mrb, resp, &app_request_type);
assert(subreq != NULL);
assert(mrb_obj_ptr(subreq->refs.request) == mrb_obj_ptr(resp));
subreq->shortcut.response = generator;
mrb_gc_register(generator->ctx->shared->mrb, resp); /* prevent request and subreq from being disposed */
if (subreq->state != INITIAL) {
/* immediately start sending response, otherwise defer it until once receive data from upstream (subreq_ostream_send) */
send_response_shortcutted(subreq);
}
return 0;
}
h2o_mruby_send_response_callback_t h2o_mruby_middleware_get_send_response_callback(h2o_mruby_context_t *ctx, mrb_value resp)
{
mrb_state *mrb = ctx->shared->mrb;
struct st_mruby_subreq_t *subreq;
if ((subreq = mrb_data_check_get_ptr(mrb, resp, &app_request_type)) == NULL)
return NULL;
return send_response_callback;
}
| 1 | 13,163 | IIRC we need to use memset, because an empty brace is not C99 comformant, and because we cannot use `{0}` because how the struct is organized is not defined in POSIX (the initializer cannot be `{0}` if the first property of the struct is a struct). | h2o-h2o | c |
@@ -434,6 +434,11 @@ type NetworkPolicyPeer struct {
// Exact FQDNs, i.e. "google.com", "db-svc.default.svc.cluster.local"
// Wildcard expressions, i.e. "*wayfair.com".
FQDN string `json:"fqdn,omitempty"`
+ // Select all Pods with the ServiceAccount matched by this field, as
+ // workloads in AppliedTo/To/From fields.
+ // Cannot be set with any other selector.
+ // +optional
+ ServiceAccounts []ServiceAccount `json:"serviceAccounts,omitempty"`
}
type PeerNamespaces struct { | 1 | // Copyright 2021 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
type TraceflowPhase string
const (
// Pending is not used anymore
Pending TraceflowPhase = "Pending"
Running TraceflowPhase = "Running"
Succeeded TraceflowPhase = "Succeeded"
Failed TraceflowPhase = "Failed"
)
type TraceflowComponent string
const (
ComponentSpoofGuard TraceflowComponent = "SpoofGuard"
ComponentLB TraceflowComponent = "LB"
ComponentRouting TraceflowComponent = "Routing"
ComponentNetworkPolicy TraceflowComponent = "NetworkPolicy"
ComponentForwarding TraceflowComponent = "Forwarding"
)
type TraceflowAction string
const (
ActionDelivered TraceflowAction = "Delivered"
ActionReceived TraceflowAction = "Received"
ActionForwarded TraceflowAction = "Forwarded"
ActionDropped TraceflowAction = "Dropped"
ActionRejected TraceflowAction = "Rejected"
// ActionForwardedOutOfOverlay indicates that the packet has been forwarded out of the network
// managed by Antrea. This indicates that the Traceflow request can be considered complete.
ActionForwardedOutOfOverlay TraceflowAction = "ForwardedOutOfOverlay"
)
// List the supported protocols and their codes in traceflow.
// According to code in Antrea agent and controller, default protocol is ICMP if protocol is not inputted by users.
const (
ICMPProtocol int32 = 1
TCPProtocol int32 = 6
UDPProtocol int32 = 17
SCTPProtocol int32 = 132
)
var SupportedProtocols = map[string]int32{
"TCP": TCPProtocol,
"UDP": UDPProtocol,
"ICMP": ICMPProtocol,
}
var ProtocolsToString = map[int32]string{
TCPProtocol: "TCP",
UDPProtocol: "UDP",
ICMPProtocol: "ICMP",
SCTPProtocol: "SCTP",
}
// List the supported destination types in traceflow.
const (
DstTypePod = "Pod"
DstTypeService = "Service"
DstTypeIPv4 = "IPv4"
)
var SupportedDestinationTypes = []string{
DstTypePod,
DstTypeService,
DstTypeIPv4,
}
// List the ethernet types.
const (
EtherTypeIPv4 uint16 = 0x0800
EtherTypeIPv6 uint16 = 0x86DD
)
// Default timeout in seconds.
const DefaultTraceflowTimeout uint16 = 20
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Traceflow struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec TraceflowSpec `json:"spec,omitempty"`
Status TraceflowStatus `json:"status,omitempty"`
}
// TraceflowSpec describes the spec of the traceflow.
type TraceflowSpec struct {
Source Source `json:"source,omitempty"`
Destination Destination `json:"destination,omitempty"`
Packet Packet `json:"packet,omitempty"`
// LiveTraffic indicates the Traceflow is to trace the live traffic
// rather than an injected packet, when set to true. The first packet of
// the first connection that matches the packet spec will be traced.
LiveTraffic bool `json:"liveTraffic,omitempty"`
// DroppedOnly indicates only the dropped packet should be captured in a
// live-traffic Traceflow.
DroppedOnly bool `json:"droppedOnly,omitempty"`
// Timeout specifies the timeout of the Traceflow in seconds. Defaults
// to 20 seconds if not set.
Timeout uint16 `json:"timeout,omitempty"`
}
// Source describes the source spec of the traceflow.
type Source struct {
// Namespace is the source namespace.
Namespace string `json:"namespace,omitempty"`
// Pod is the source pod.
Pod string `json:"pod,omitempty"`
// IP is the source IPv4 or IPv6 address. IP as the source is supported
// only for live-traffic Traceflow.
IP string `json:"ip,omitempty"`
}
// Destination describes the destination spec of the traceflow.
type Destination struct {
// Namespace is the destination namespace.
Namespace string `json:"namespace,omitempty"`
// Pod is the destination pod, exclusive with destination service.
Pod string `json:"pod,omitempty"`
// Service is the destination service, exclusive with destination pod.
Service string `json:"service,omitempty"`
// IP is the destination IPv4 or IPv6 address.
IP string `json:"ip,omitempty"`
}
// IPHeader describes spec of an IPv4 header.
type IPHeader struct {
// SrcIP is the source IP.
SrcIP string `json:"srcIP,omitempty" yaml:"srcIP,omitempty"`
// Protocol is the IP protocol.
Protocol int32 `json:"protocol,omitempty" yaml:"protocol,omitempty"`
// TTL is the IP TTL.
TTL int32 `json:"ttl,omitempty" yaml:"ttl,omitempty"`
// Flags is the flags for IP.
Flags int32 `json:"flags,omitempty" yaml:"flags,omitempty"`
}
// IPv6Header describes spec of an IPv6 header.
type IPv6Header struct {
// SrcIP is the source IPv6.
SrcIP string `json:"srcIP,omitempty" yaml:"srcIP,omitempty"`
// NextHeader is the IPv6 protocol.
NextHeader *int32 `json:"nextHeader,omitempty" yaml:"nextHeader,omitempty"`
// HopLimit is the IPv6 Hop Limit.
HopLimit int32 `json:"hopLimit,omitempty" yaml:"hopLimit,omitempty"`
}
// TransportHeader describes spec of a TransportHeader.
type TransportHeader struct {
ICMP *ICMPEchoRequestHeader `json:"icmp,omitempty" yaml:"icmp,omitempty"`
UDP *UDPHeader `json:"udp,omitempty" yaml:"udp,omitempty"`
TCP *TCPHeader `json:"tcp,omitempty" yaml:"tcp,omitempty"`
}
// ICMPEchoRequestHeader describes spec of an ICMP echo request header.
type ICMPEchoRequestHeader struct {
// ID is the ICMPEchoRequestHeader ID.
ID int32 `json:"id,omitempty"`
// Sequence is the ICMPEchoRequestHeader sequence.
Sequence int32 `json:"sequence,omitempty"`
}
// UDPHeader describes spec of a UDP header.
type UDPHeader struct {
// SrcPort is the source port.
SrcPort int32 `json:"srcPort,omitempty"`
// DstPort is the destination port.
DstPort int32 `json:"dstPort,omitempty"`
}
// TCPHeader describes spec of a TCP header.
type TCPHeader struct {
// SrcPort is the source port.
SrcPort int32 `json:"srcPort,omitempty"`
// DstPort is the destination port.
DstPort int32 `json:"dstPort,omitempty"`
// Flags are flags in the header.
Flags int32 `json:"flags,omitempty"`
}
// Packet includes header info.
type Packet struct {
SrcIP string `json:"srcIP,omitempty"`
DstIP string `json:"dstIP,omitempty"`
// Length is the IP packet length (includes the IPv4 or IPv6 header length).
Length uint16 `json:"length,omitempty"`
// TODO: change type IPHeader to *IPHeader and correct all internal references
IPHeader IPHeader `json:"ipHeader,omitempty"`
IPv6Header *IPv6Header `json:"ipv6Header,omitempty"`
TransportHeader TransportHeader `json:"transportHeader,omitempty"`
}
// TraceflowStatus describes current status of the traceflow.
type TraceflowStatus struct {
// Phase is the Traceflow phase.
Phase TraceflowPhase `json:"phase,omitempty"`
// Reason is a message indicating the reason of the traceflow's current phase.
Reason string `json:"reason,omitempty"`
// StartTime is the time at which the Traceflow as started by the Antrea Controller.
// Before K8s v1.20, null values (field not set) are not pruned, and a CR where a
// metav1.Time field is not set would fail OpenAPI validation (type string). The
// recommendation seems to be to use a pointer instead, and the field will be omitted when
// serializing.
// See https://github.com/kubernetes/kubernetes/issues/86811
StartTime *metav1.Time `json:"startTime,omitempty"`
// DataplaneTag is a tag to identify a traceflow session across Nodes.
DataplaneTag uint8 `json:"dataplaneTag,omitempty"`
// Results is the collection of all observations on different nodes.
Results []NodeResult `json:"results,omitempty"`
// CapturedPacket is the captured packet in live-traffic Traceflow.
CapturedPacket *Packet `json:"capturedPacket,omitempty"`
}
type NodeResult struct {
// Node is the node of the observation.
Node string `json:"node,omitempty" yaml:"node,omitempty"`
// Role of the node like sender, receiver, etc.
Role string `json:"role,omitempty" yaml:"role,omitempty"`
// Timestamp is the timestamp of the observations on the node.
Timestamp int64 `json:"timestamp,omitempty" yaml:"timestamp,omitempty"`
// Observations includes all observations from sender nodes, receiver ones, etc.
Observations []Observation `json:"observations,omitempty" yaml:"observations,omitempty"`
}
// Observation describes those from sender nodes or receiver nodes.
type Observation struct {
// Component is the observation component.
Component TraceflowComponent `json:"component,omitempty" yaml:"component,omitempty"`
// ComponentInfo is the extension of Component field.
ComponentInfo string `json:"componentInfo,omitempty" yaml:"componentInfo,omitempty"`
// Action is the action to the observation.
Action TraceflowAction `json:"action,omitempty" yaml:"action,omitempty"`
// Pod is the combination of Pod name and Pod Namespace.
Pod string `json:"pod,omitempty" yaml:"pod,omitempty"`
// DstMAC is the destination MAC.
DstMAC string `json:"dstMAC,omitempty" yaml:"dstMAC,omitempty"`
// NetworkPolicy is the combination of Namespace and NetworkPolicyName.
NetworkPolicy string `json:"networkPolicy,omitempty" yaml:"networkPolicy,omitempty"`
// TTL is the observation TTL.
TTL int32 `json:"ttl,omitempty" yaml:"ttl,omitempty"`
// TranslatedSrcIP is the translated source IP.
TranslatedSrcIP string `json:"translatedSrcIP,omitempty" yaml:"translatedSrcIP,omitempty"`
// TranslatedDstIP is the translated destination IP.
TranslatedDstIP string `json:"translatedDstIP,omitempty" yaml:"translatedDstIP,omitempty"`
// TunnelDstIP is the tunnel destination IP.
TunnelDstIP string `json:"tunnelDstIP,omitempty" yaml:"tunnelDstIP,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type TraceflowList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Traceflow `json:"items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type NetworkPolicy struct {
metav1.TypeMeta `json:",inline"`
// Standard metadata of the object.
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the desired behavior of NetworkPolicy.
Spec NetworkPolicySpec `json:"spec"`
// Most recently observed status of the NetworkPolicy.
Status NetworkPolicyStatus `json:"status"`
}
// NetworkPolicySpec defines the desired state for NetworkPolicy.
type NetworkPolicySpec struct {
// Tier specifies the tier to which this NetworkPolicy belongs to.
// The NetworkPolicy order will be determined based on the combination of the
// Tier's Priority and the NetworkPolicy's own Priority. If not specified,
// this policy will be created in the Application Tier right above the K8s
// NetworkPolicy which resides at the bottom.
Tier string `json:"tier,omitempty"`
// Priority specfies the order of the NetworkPolicy relative to other
// NetworkPolicies.
Priority float64 `json:"priority"`
// Select workloads on which the rules will be applied to. Cannot be set in
// conjunction with AppliedTo in each rule.
// +optional
AppliedTo []NetworkPolicyPeer `json:"appliedTo,omitempty"`
// Set of ingress rules evaluated based on the order in which they are set.
// Currently Ingress rule supports setting the `From` field but not the `To`
// field within a Rule.
// +optional
Ingress []Rule `json:"ingress"`
// Set of egress rules evaluated based on the order in which they are set.
// Currently Egress rule supports setting the `To` field but not the `From`
// field within a Rule.
// +optional
Egress []Rule `json:"egress"`
}
// NetworkPolicyPhase defines the phase in which a NetworkPolicy is.
type NetworkPolicyPhase string
// These are the valid values for NetworkPolicyPhase.
const (
// NetworkPolicyPending means the NetworkPolicy has been accepted by the system, but it has not been processed by Antrea.
NetworkPolicyPending NetworkPolicyPhase = "Pending"
// NetworkPolicyRealizing means the NetworkPolicy has been observed by Antrea and is being realized.
NetworkPolicyRealizing NetworkPolicyPhase = "Realizing"
// NetworkPolicyRealized means the NetworkPolicy has been enforced to all Pods on all Nodes it applies to.
NetworkPolicyRealized NetworkPolicyPhase = "Realized"
)
// NetworkPolicyStatus represents information about the status of a NetworkPolicy.
type NetworkPolicyStatus struct {
// The phase of a NetworkPolicy is a simple, high-level summary of the NetworkPolicy's status.
Phase NetworkPolicyPhase `json:"phase"`
// The generation observed by Antrea.
ObservedGeneration int64 `json:"observedGeneration"`
// The number of nodes that have realized the NetworkPolicy.
CurrentNodesRealized int32 `json:"currentNodesRealized"`
// The total number of nodes that should realize the NetworkPolicy.
DesiredNodesRealized int32 `json:"desiredNodesRealized"`
}
// Rule describes the traffic allowed to/from the workloads selected by
// Spec.AppliedTo. Based on the action specified in the rule, traffic is either
// allowed or denied which exactly match the specified ports and protocol.
type Rule struct {
// Action specifies the action to be applied on the rule.
Action *RuleAction `json:"action"`
// Set of port and protocol allowed/denied by the rule. If this field is unset
// or empty, this rule matches all ports.
// +optional
Ports []NetworkPolicyPort `json:"ports,omitempty"`
// Rule is matched if traffic originates from workloads selected by
// this field. If this field is empty, this rule matches all sources.
// +optional
From []NetworkPolicyPeer `json:"from"`
// Rule is matched if traffic is intended for workloads selected by
// this field. This field can't be used with ToServices. If this field
// and ToServices are both empty or missing this rule matches all destinations.
// +optional
To []NetworkPolicyPeer `json:"to"`
// Rule is matched if traffic is intended for a Service listed in this field.
// Currently only ClusterIP types Services are supported in this field. This field
// can only be used when AntreaProxy is enabled. This field can't be used with To
// or Ports. If this field and To are both empty or missing, this rule matches all
// destinations.
// +optional
ToServices []ServiceReference `json:"toServices,omitempty"`
// Name describes the intention of this rule.
// Name should be unique within the policy.
// +optional
Name string `json:"name"`
// EnableLogging is used to indicate if agent should generate logs
// when rules are matched. Should be default to false.
EnableLogging bool `json:"enableLogging"`
// Select workloads on which this rule will be applied to. Cannot be set in
// conjunction with NetworkPolicySpec/ClusterNetworkPolicySpec.AppliedTo.
// +optional
AppliedTo []NetworkPolicyPeer `json:"appliedTo,omitempty"`
}
// NetworkPolicyPeer describes the grouping selector of workloads.
type NetworkPolicyPeer struct {
// IPBlock describes the IPAddresses/IPBlocks that is matched in to/from.
// IPBlock cannot be set as part of the AppliedTo field.
// Cannot be set with any other selector.
// +optional
IPBlock *IPBlock `json:"ipBlock,omitempty"`
// Select Pods from NetworkPolicy's Namespace as workloads in
// AppliedTo/To/From fields. If set with NamespaceSelector, Pods are
// matched from Namespaces matched by the NamespaceSelector.
// Cannot be set with any other selector except NamespaceSelector.
// +optional
PodSelector *metav1.LabelSelector `json:"podSelector,omitempty"`
// Select all Pods from Namespaces matched by this selector, as
// workloads in To/From fields. If set with PodSelector,
// Pods are matched from Namespaces matched by the NamespaceSelector.
// Cannot be set with any other selector except PodSelector or
// ExternalEntitySelector. Cannot be set with Namespaces.
// +optional
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"`
// Select Pod/ExternalEntity from Namespaces matched by specifc criteria.
// Current supported criteria is match: Self, which selects from the same
// Namespace of the appliedTo workloads.
// Cannot be set with any other selector except PodSelector or
// ExternalEntitySelector. This field can only be set when NetworkPolicyPeer
// is created for ClusterNetworkPolicy ingress/egress rules.
// Cannot be set with NamespaceSelector.
// +optional
Namespaces *PeerNamespaces `json:"namespaces,omitempty"`
// Select ExternalEntities from NetworkPolicy's Namespace as workloads
// in AppliedTo/To/From fields. If set with NamespaceSelector,
// ExternalEntities are matched from Namespaces matched by the
// NamespaceSelector.
// Cannot be set with any other selector except NamespaceSelector.
// +optional
ExternalEntitySelector *metav1.LabelSelector `json:"externalEntitySelector,omitempty"`
// Group is the name of the ClusterGroup which can be set as an
// AppliedTo or within an Ingress or Egress rule in place of
// a stand-alone selector. A Group cannot be set with any other
// selector.
Group string `json:"group,omitempty"`
// Restrict egress access to the Fully Qualified Domain Names prescribed
// by name or by wildcard match patterns. This field can only be set for
// NetworkPolicyPeer of egress rules.
// Supported formats are:
// Exact FQDNs, i.e. "google.com", "db-svc.default.svc.cluster.local"
// Wildcard expressions, i.e. "*wayfair.com".
FQDN string `json:"fqdn,omitempty"`
}
type PeerNamespaces struct {
Match NamespaceMatchType `json:"match,omitempty"`
}
// NamespaceMatchType describes Namespace matching strategy.
type NamespaceMatchType string
const (
NamespaceMatchSelf NamespaceMatchType = "Self"
)
// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed
// or denied to/from the workloads matched by a Spec.AppliedTo.
type IPBlock struct {
// CIDR is a string representing the IP Block
// Valid examples are "192.168.1.1/24".
CIDR string `json:"cidr"`
}
// NetworkPolicyPort describes the port and protocol to match in a rule.
type NetworkPolicyPort struct {
// The protocol (TCP, UDP, or SCTP) which traffic must match.
// If not specified, this field defaults to TCP.
// +optional
Protocol *v1.Protocol `json:"protocol,omitempty"`
// The port on the given protocol. This can be either a numerical
// or named port on a Pod. If this field is not provided, this
// matches all port names and numbers.
// +optional
Port *intstr.IntOrString `json:"port,omitempty"`
// EndPort defines the end of the port range, being the end included within the range.
// It can only be specified when a numerical `port` is specified.
// +optional
EndPort *int32 `json:"endPort,omitempty"`
}
// ServiceReference represents a reference to a v1.Service.
type ServiceReference struct {
// Name of the Service
Name string `json:"name"`
// Namespace of the Service
Namespace string `json:"namespace,omitempty"`
}
// RuleAction describes the action to be applied on traffic matching a rule.
type RuleAction string
const (
// RuleActionAllow describes that the traffic matching the rule must be allowed.
RuleActionAllow RuleAction = "Allow"
// RuleActionDrop describes that the traffic matching the rule must be dropped.
RuleActionDrop RuleAction = "Drop"
// RuleActionPass indicates that the traffic matching the rule will not be evalutated
// by Antrea NetworkPolicy or ClusterNetworkPolicy, but rather punt to K8s namespaced
// NetworkPolicy for evaluaion.
RuleActionPass RuleAction = "Pass"
// RuleActionReject indicates that the traffic matching the rule must be rejected and the
// client will receive a response.
RuleActionReject RuleAction = "Reject"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type NetworkPolicyList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []NetworkPolicy `json:"items"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ClusterNetworkPolicy struct {
metav1.TypeMeta `json:",inline"`
// Standard metadata of the object.
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the desired behavior of ClusterNetworkPolicy.
Spec ClusterNetworkPolicySpec `json:"spec"`
// Most recently observed status of the NetworkPolicy.
Status NetworkPolicyStatus `json:"status"`
}
// ClusterNetworkPolicySpec defines the desired state for ClusterNetworkPolicy.
type ClusterNetworkPolicySpec struct {
// Tier specifies the tier to which this ClusterNetworkPolicy belongs to.
// The ClusterNetworkPolicy order will be determined based on the
// combination of the Tier's Priority and the ClusterNetworkPolicy's own
// Priority. If not specified, this policy will be created in the Application
// Tier right above the K8s NetworkPolicy which resides at the bottom.
Tier string `json:"tier,omitempty"`
// Priority specfies the order of the ClusterNetworkPolicy relative to
// other AntreaClusterNetworkPolicies.
Priority float64 `json:"priority"`
// Select workloads on which the rules will be applied to. Cannot be set in
// conjunction with AppliedTo in each rule.
// +optional
AppliedTo []NetworkPolicyPeer `json:"appliedTo,omitempty"`
// Set of ingress rules evaluated based on the order in which they are set.
// Currently Ingress rule supports setting the `From` field but not the `To`
// field within a Rule.
// +optional
Ingress []Rule `json:"ingress"`
// Set of egress rules evaluated based on the order in which they are set.
// Currently Egress rule supports setting the `To` field but not the `From`
// field within a Rule.
// +optional
Egress []Rule `json:"egress"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ClusterNetworkPolicyList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterNetworkPolicy `json:"items"`
}
// +genclient
// +genclient:nonNamespaced
// +genclient:noStatus
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Tier struct {
metav1.TypeMeta `json:",inline"`
// Standard metadata of the object.
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the desired behavior of Tier.
Spec TierSpec `json:"spec"`
}
// TierSpec defines the desired state for Tier.
type TierSpec struct {
// Priority specfies the order of the Tier relative to other Tiers.
Priority int32 `json:"priority"`
// Description is an optional field to add more information regarding
// the purpose of this Tier.
Description string `json:"description,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type TierList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []Tier `json:"items"`
}
| 1 | 49,149 | Will we be adding this to `AppliedTo` as well? If not, any reason why not? | antrea-io-antrea | go |
@@ -813,6 +813,13 @@ Tries to force this object to take the focus.
"""
return False
+ def shouldAcceptShowHideCaretEvent(self):
+ """Some objects/applications send show/hide caret events when we don't expect it, such as when the cursor is blinking.
+ @return: if show/hide caret events should be accepted for this object.
+ @rtype: Boolean
+ """
+ return True
+
def reportFocus(self):
"""Announces this object in a way suitable such that it gained focus.
""" | 1 | # -*- coding: UTF-8 -*-
#NVDAObjects/__init__.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2017 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Patrick Zajda, Babbage B.V.
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
"""Module that contains the base NVDA object type"""
from new import instancemethod
import time
import re
import weakref
from logHandler import log
import review
import eventHandler
from displayModel import DisplayModelTextInfo
import baseObject
import speech
import api
import textInfos.offsets
import config
import controlTypes
import appModuleHandler
import treeInterceptorHandler
import braille
import globalPluginHandler
class NVDAObjectTextInfo(textInfos.offsets.OffsetsTextInfo):
"""A default TextInfo which is used to enable text review of information about widgets that don't support text content.
The L{NVDAObject.basicText} attribute is used as the text to expose.
"""
locationText=None
def _get_unit_mouseChunk(self):
return textInfos.UNIT_STORY
def _getStoryText(self):
return self.obj.basicText
def _getStoryLength(self):
return len(self._getStoryText())
def _getTextRange(self,start,end):
text=self._getStoryText()
return text[start:end]
class InvalidNVDAObject(RuntimeError):
"""Raised by NVDAObjects during construction to inform that this object is invalid.
In this case, for the purposes of NVDA, the object should be considered non-existent.
Therefore, L{DynamicNVDAObjectType} will return C{None} if this exception is raised.
"""
class DynamicNVDAObjectType(baseObject.ScriptableObject.__class__):
_dynamicClassCache={}
def __call__(self,chooseBestAPI=True,**kwargs):
if chooseBestAPI:
APIClass=self.findBestAPIClass(kwargs)
if not APIClass: return None
else:
APIClass=self
# Instantiate the requested class.
try:
obj=APIClass.__new__(APIClass,**kwargs)
obj.APIClass=APIClass
if isinstance(obj,self):
obj.__init__(**kwargs)
except InvalidNVDAObject, e:
log.debugWarning("Invalid NVDAObject: %s" % e, stack_info=True)
return None
clsList = []
if "findOverlayClasses" in APIClass.__dict__:
obj.findOverlayClasses(clsList)
else:
clsList.append(APIClass)
# Allow app modules to choose overlay classes.
appModule=obj.appModule
# optimisation: The base implementation of chooseNVDAObjectOverlayClasses does nothing,
# so only call this method if it's been overridden.
if appModule and not hasattr(appModule.chooseNVDAObjectOverlayClasses, "_isBase"):
appModule.chooseNVDAObjectOverlayClasses(obj, clsList)
# Allow global plugins to choose overlay classes.
for plugin in globalPluginHandler.runningPlugins:
if "chooseNVDAObjectOverlayClasses" in plugin.__class__.__dict__:
plugin.chooseNVDAObjectOverlayClasses(obj, clsList)
# Determine the bases for the new class.
bases=[]
for index in xrange(len(clsList)):
# A class doesn't need to be a base if it is already implicitly included by being a superclass of a previous base.
if index==0 or not issubclass(clsList[index-1],clsList[index]):
bases.append(clsList[index])
# Construct the new class.
if len(bases) == 1:
# We only have one base, so there's no point in creating a dynamic type.
newCls=bases[0]
else:
bases=tuple(bases)
newCls=self._dynamicClassCache.get(bases,None)
if not newCls:
name="Dynamic_%s"%"".join([x.__name__ for x in clsList])
newCls=type(name,bases,{})
self._dynamicClassCache[bases]=newCls
oldMro=frozenset(obj.__class__.__mro__)
# Mutate obj into the new class.
obj.__class__=newCls
# Initialise the overlay classes.
for cls in reversed(newCls.__mro__):
if cls in oldMro:
# This class was part of the initially constructed object, so its constructor would have been called.
continue
initFunc=cls.__dict__.get("initOverlayClass")
if initFunc:
initFunc(obj)
# Bind gestures specified on the class.
try:
obj.bindGestures(getattr(cls, "_%s__gestures" % cls.__name__))
except AttributeError:
pass
# Allow app modules to make minor tweaks to the instance.
if appModule and hasattr(appModule,"event_NVDAObject_init"):
appModule.event_NVDAObject_init(obj)
return obj
@classmethod
def clearDynamicClassCache(cls):
"""Clear the dynamic class cache.
This should be called when a plugin is unloaded so that any used overlay classes in the unloaded plugin can be garbage collected.
"""
cls._dynamicClassCache.clear()
class NVDAObject(baseObject.ScriptableObject):
"""NVDA's representation of a single control/widget.
Every widget, regardless of how it is exposed by an application or the operating system, is represented by a single NVDAObject instance.
This allows NVDA to work with all widgets in a uniform way.
An NVDAObject provides information about the widget (e.g. its name, role and value),
as well as functionality to manipulate it (e.g. perform an action or set focus).
Events for the widget are handled by special event methods on the object.
Commands triggered by input from the user can also be handled by special methods called scripts.
See L{ScriptableObject} for more details.
The only attribute that absolutely must be provided is L{processID}.
However, subclasses should provide at least the L{name} and L{role} attributes in order for the object to be meaningful to the user.
Attributes such as L{parent}, L{firstChild}, L{next} and L{previous} link an instance to other NVDAObjects in the hierarchy.
In order to facilitate access to text exposed by a widget which supports text content (e.g. an editable text control),
a L{textInfos.TextInfo} should be implemented and the L{TextInfo} attribute should specify this class.
There are two main types of NVDAObject classes:
* API classes, which provide the core functionality to work with objects exposed using a particular API (e.g. MSAA/IAccessible).
* Overlay classes, which supplement the core functionality provided by an API class to handle a specific widget or type of widget.
Most developers need only be concerned with overlay classes.
The overlay classes to be used for an instance are determined using the L{findOverlayClasses} method on the API class.
An L{AppModule} can also choose overlay classes for an instance using the L{AppModule.chooseNVDAObjectOverlayClasses} method.
"""
__metaclass__=DynamicNVDAObjectType
cachePropertiesByDefault = True
#: The TextInfo class this object should use to provide access to text.
#: @type: type; L{textInfos.TextInfo}
TextInfo=NVDAObjectTextInfo
@classmethod
def findBestAPIClass(cls,kwargs,relation=None):
"""
Finds out the highest-level APIClass this object can get to given these kwargs, and updates the kwargs and returns the APIClass.
@param relation: the relationship of a possible new object of this type to another object creating it (e.g. parent).
@param type: string
@param kwargs: the arguments necessary to construct an object of the class this method was called on.
@type kwargs: dictionary
@returns: the new APIClass
@rtype: DynamicNVDAObjectType
"""
newAPIClass=cls
if 'getPossibleAPIClasses' in newAPIClass.__dict__:
for possibleAPIClass in newAPIClass.getPossibleAPIClasses(kwargs,relation=relation):
if 'kwargsFromSuper' not in possibleAPIClass.__dict__:
log.error("possible API class %s does not implement kwargsFromSuper"%possibleAPIClass)
continue
if possibleAPIClass.kwargsFromSuper(kwargs,relation=relation):
return possibleAPIClass.findBestAPIClass(kwargs,relation=relation)
return newAPIClass if newAPIClass is not NVDAObject else None
@classmethod
def getPossibleAPIClasses(cls,kwargs,relation=None):
"""
Provides a generator which can generate all the possible API classes (in priority order) that inherit directly from the class it was called on.
@param relation: the relationship of a possible new object of this type to another object creating it (e.g. parent).
@param type: string
@param kwargs: the arguments necessary to construct an object of the class this method was called on.
@type kwargs: dictionary
@returns: a generator
@rtype: generator
"""
import NVDAObjects.window
yield NVDAObjects.window.Window
@classmethod
def kwargsFromSuper(cls,kwargs,relation=None):
"""
Finds out if this class can be instanciated from the given super kwargs.
If so it updates the kwargs to contain everything it will need to instanciate this class, and returns True.
If this class can not be instanciated, it returns False and kwargs is not touched.
@param relation: why is this class being instanciated? parent, focus, foreground etc...
@type relation: string
@param kwargs: the kwargs for constructing this class's super class.
@type kwargs: dict
@rtype: boolean
"""
raise NotImplementedError
def findOverlayClasses(self, clsList):
"""Chooses overlay classes which should be added to this object's class structure after the object has been initially instantiated.
After an NVDAObject class (normally an API-level class) is instantiated, this method is called on the instance to choose appropriate overlay classes.
This method may use properties, etc. on the instance to make this choice.
The object's class structure is then mutated to contain these classes.
L{initOverlayClass} is then called for each class which was not part of the initially instantiated object.
This process allows an NVDAObject to be dynamically created using the most appropriate NVDAObject subclass at each API level.
Classes should be listed with subclasses first. That is, subclasses should generally call super and then append their own classes to the list.
For example: Called on an IAccessible NVDAObjectThe list might contain DialogIaccessible (a subclass of IAccessible), Edit (a subclass of Window).
@param clsList: The list of classes, which will be modified by this method if appropriate.
@type clsList: list of L{NVDAObject}
"""
clsList.append(NVDAObject)
beTransparentToMouse=False #:If true then NVDA will never consider the mouse to be on this object, rather it will be on an ancestor.
@staticmethod
def objectFromPoint(x,y):
"""Retreaves an NVDAObject instance representing a control in the Operating System at the given x and y coordinates.
@param x: the x coordinate.
@type x: int
@param y: the y coordinate.
@param y: int
@return: The object at the given x and y coordinates.
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation=(x,y))
return APIClass(chooseBestAPI=False,**kwargs) if APIClass else None
@staticmethod
def objectWithFocus():
"""Retreaves the object representing the control currently with focus in the Operating System. This differens from NVDA's focus object as this focus object is the real focus object according to the Operating System, not according to NVDA.
@return: the object with focus.
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation="focus")
if not APIClass:
return None
obj=APIClass(chooseBestAPI=False,**kwargs)
if not obj:
return None
focusRedirect=obj.focusRedirect
if focusRedirect:
obj=focusRedirect
return obj
@staticmethod
def objectInForeground():
"""Retreaves the object representing the current foreground control according to the Operating System. This differes from NVDA's foreground object as this object is the real foreground object according to the Operating System, not according to NVDA.
@return: the foreground object
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation="foreground")
return APIClass(chooseBestAPI=False,**kwargs) if APIClass else None
def __init__(self):
super(NVDAObject,self).__init__()
self._mouseEntered=False #:True if the mouse has entered this object (for use in L{event_mouseMoved})
self.textRepresentationLineLength=None #:If an integer greater than 0 then lines of text in this object are always this long.
def _isEqual(self,other):
"""Calculates if this object is equal to another object. Used by L{NVDAObject.__eq__}.
@param other: the other object to compare with.
@type other: L{NVDAObject}
@return: True if equal, false otherwise.
@rtype: boolean
"""
return True
def __eq__(self,other):
"""Compaires the objects' memory addresses, their type, and uses L{NVDAObject._isEqual} to see if they are equal.
"""
if self is other:
return True
if type(self) is not type(other):
return False
return self._isEqual(other)
def __ne__(self,other):
"""The opposite to L{NVDAObject.__eq__}
"""
return not self.__eq__(other)
focusRedirect=None #: Another object which should be treeted as the focus if focus is ever given to this object.
def _get_treeInterceptorClass(self):
"""
If this NVDAObject should use a treeInterceptor, then this property provides the L{treeInterceptorHandler.TreeInterceptor} class it should use.
If not then it should be not implemented.
"""
raise NotImplementedError
#: Whether to create a tree interceptor for this object.
#: This is only relevant if L{treeInterceptorClass} is valid.
#: Normally, this should be C{True}.
#: However, for some objects (e.g. ARIA applications), a tree interceptor shouldn't be used by default,
#: but the user may wish to override this.
#: In this case, this can be set to C{False} and updated later.
#: @type: bool
shouldCreateTreeInterceptor = True
def _get_treeInterceptor(self):
"""Retreaves the treeInterceptor associated with this object.
If a treeInterceptor has not been specifically set, the L{treeInterceptorHandler} is asked if it can find a treeInterceptor containing this object.
@return: the treeInterceptor
@rtype: L{treeInterceptorHandler.TreeInterceptor}
"""
if hasattr(self,'_treeInterceptor'):
ti=self._treeInterceptor
if isinstance(ti,weakref.ref):
ti=ti()
if ti and ti in treeInterceptorHandler.runningTable:
return ti
else:
self._treeInterceptor=None
return None
else:
ti=treeInterceptorHandler.getTreeInterceptor(self)
if ti:
self._treeInterceptor=weakref.ref(ti)
return ti
def _set_treeInterceptor(self,obj):
"""Specifically sets a treeInterceptor to be associated with this object.
"""
if obj:
self._treeInterceptor=weakref.ref(obj)
else: #We can't point a weakref to None, so just set the private variable to None, it can handle that
self._treeInterceptor=None
def _get_appModule(self):
"""Retreaves the appModule representing the application this object is a part of by asking L{appModuleHandler}.
@return: the appModule
@rtype: L{appModuleHandler.AppModule}
"""
if not hasattr(self,'_appModuleRef'):
a=appModuleHandler.getAppModuleForNVDAObject(self)
if a:
self._appModuleRef=weakref.ref(a)
return a
else:
return self._appModuleRef()
def _get_name(self):
"""The name or label of this object (example: the text of a button).
@rtype: basestring
"""
return ""
def _get_role(self):
"""The role or type of control this object represents (example: button, list, dialog).
@return: a ROLE_* constant from L{controlTypes}
@rtype: int
"""
return controlTypes.ROLE_UNKNOWN
def _get_roleText(self):
"""
A custom role string for this object, which is used for braille and speech presentation, which will override the standard label for this object's role property.
No string is provided by default, meaning that NVDA will fall back to using role.
Examples of where this property might be overridden are shapes in Powerpoint, or ARIA role descriptions.
"""
return None
def _get_value(self):
"""The value of this object (example: the current percentage of a scrollbar, the selected option in a combo box).
@rtype: basestring
"""
return ""
def _get_description(self):
"""The description or help text of this object.
@rtype: basestring
"""
return ""
def _get_controllerFor(self):
"""Retreaves the object/s that this object controls."""
return []
def _get_actionCount(self):
"""Retreaves the number of actions supported by this object."""
return 0
def getActionName(self,index=None):
"""Retreaves the name of an action supported by this object.
If index is not given then the default action will be used if it exists.
@param index: the optional 0-based index of the wanted action.
@type index: int
@return: the action's name
@rtype: basestring
"""
raise NotImplementedError
def doAction(self,index=None):
"""Performs an action supported by this object.
If index is not given then the default action will be used if it exists.
"""
raise NotImplementedError
def _get_defaultActionIndex(self):
"""Retreaves the index of the action that is the default."""
return 0
def _get_keyboardShortcut(self):
"""The shortcut key that activates this object(example: alt+t).
@rtype: basestring
"""
return ""
def _get_isInForeground(self):
"""
Finds out if this object is currently within the foreground.
"""
raise NotImplementedError
def _get_states(self):
"""Retreaves the current states of this object (example: selected, focused).
@return: a set of STATE_* constants from L{controlTypes}.
@rtype: set of int
"""
return set()
def _get_location(self):
"""The location of this object on the screen.
@return: left, top, width and height of the object.
@rtype: tuple of int
"""
raise NotImplementedError
def _get_locationText(self):
"""A message that explains the location of the object in friendly terms."""
location=self.location
if not location:
return None
(left,top,width,height)=location
deskLocation=api.getDesktopObject().location
(deskLeft,deskTop,deskWidth,deskHeight)=deskLocation
percentFromLeft=(float(left-deskLeft)/deskWidth)*100
percentFromTop=(float(top-deskTop)/deskHeight)*100
percentWidth=(float(width)/deskWidth)*100
percentHeight=(float(height)/deskHeight)*100
# Translators: Reports navigator object's dimensions (example output: object edges positioned 20 per cent from left edge of screen, 10 per cent from top edge of screen, width is 40 per cent of screen, height is 50 per cent of screen).
return _("Object edges positioned {left:.1f} per cent from left edge of screen, {top:.1f} per cent from top edge of screen, width is {width:.1f} per cent of screen, height is {height:.1f} per cent of screen").format(left=percentFromLeft,top=percentFromTop,width=percentWidth,height=percentHeight)
def _get_parent(self):
"""Retreaves this object's parent (the object that contains this object).
@return: the parent object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_container(self):
"""
Exactly like parent, however another object at this same sibling level may be retreaved first (e.g. a groupbox). Mostly used when presenting context such as focus ancestry.
"""
# Cache parent.
parent = self.parent
self.parent = parent
return parent
def _get_next(self):
"""Retreaves the object directly after this object with the same parent.
@return: the next object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_previous(self):
"""Retreaves the object directly before this object with the same parent.
@return: the previous object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_firstChild(self):
"""Retreaves the first object that this object contains.
@return: the first child object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_lastChild(self):
"""Retreaves the last object that this object contains.
@return: the last child object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_children(self):
"""Retreaves a list of all the objects directly contained by this object (who's parent is this object).
@rtype: list of L{NVDAObject}
"""
children=[]
child=self.firstChild
while child:
children.append(child)
child=child.next
return children
def getChild(self, index):
"""Retrieve a child by index.
@note: Subclasses may override this if they have an efficient way to retrieve a single, arbitrary child.
The base implementation uses L{children}.
@param index: The 0-based index of the child to retrieve.
@type index: int
@return: The child.
@rtype: L{NVDAObject}
"""
return self.children[index]
def _get_rowNumber(self):
"""Retreaves the row number of this object if it is in a table.
@rtype: int
"""
raise NotImplementedError
def _get_columnNumber(self):
"""Retreaves the column number of this object if it is in a table.
@rtype: int
"""
raise NotImplementedError
def _get_cellCoordsText(self):
"""
An alternative text representation of cell coordinates e.g. "a1". Will override presentation of rowNumber and columnNumber.
Only implement if the representation is really different.
"""
return None
def _get_rowCount(self):
"""Retreaves the number of rows this object contains if its a table.
@rtype: int
"""
raise NotImplementedError
def _get_columnCount(self):
"""Retreaves the number of columns this object contains if its a table.
@rtype: int
"""
raise NotImplementedError
def _get_rowHeaderText(self):
"""The text of the row headers for this cell.
@rtype: str
"""
raise NotImplementedError
def _get_columnHeaderText(self):
"""The text of the column headers for this cell.
@rtype: str
"""
raise NotImplementedError
def _get_table(self):
"""Retreaves the object that represents the table that this object is contained in, if this object is a table cell.
@rtype: L{NVDAObject}
"""
raise NotImplementedError
def _get_tableID(self):
"""The identifier of the table associated with this object if it is a table cell.
This identifier must distinguish this table from other tables.
If this is not implemented, table cell information will still be reported,
but row and column information will always be reported
even if the user moves to a cell in the same row/column.
"""
raise NotImplementedError
def _get_recursiveDescendants(self):
"""Recursively traverse and return the descendants of this object.
This is a depth-first forward traversal.
@return: The recursive descendants of this object.
@rtype: generator of L{NVDAObject}
"""
for child in self.children:
yield child
for recursiveChild in child.recursiveDescendants:
yield recursiveChild
presType_unavailable="unavailable"
presType_layout="layout"
presType_content="content"
def _get_presentationType(self):
states=self.states
if controlTypes.STATE_INVISIBLE in states or controlTypes.STATE_UNAVAILABLE in states:
return self.presType_unavailable
role=self.role
#Static text should be content only if it really use usable text
if role==controlTypes.ROLE_STATICTEXT:
text=self.makeTextInfo(textInfos.POSITION_ALL).text
return self.presType_content if text and not text.isspace() else self.presType_layout
if role in (controlTypes.ROLE_UNKNOWN, controlTypes.ROLE_PANE, controlTypes.ROLE_TEXTFRAME, controlTypes.ROLE_ROOTPANE, controlTypes.ROLE_LAYEREDPANE, controlTypes.ROLE_SCROLLPANE, controlTypes.ROLE_SPLITPANE, controlTypes.ROLE_SECTION, controlTypes.ROLE_PARAGRAPH, controlTypes.ROLE_TITLEBAR, controlTypes.ROLE_LABEL, controlTypes.ROLE_WHITESPACE,controlTypes.ROLE_BORDER):
return self.presType_layout
name = self.name
description = self.description
if not name and not description:
if role in (controlTypes.ROLE_WINDOW,controlTypes.ROLE_PANEL, controlTypes.ROLE_PROPERTYPAGE, controlTypes.ROLE_TEXTFRAME, controlTypes.ROLE_GROUPING,controlTypes.ROLE_OPTIONPANE,controlTypes.ROLE_INTERNALFRAME,controlTypes.ROLE_FORM,controlTypes.ROLE_TABLEBODY):
return self.presType_layout
if role == controlTypes.ROLE_TABLE and not config.conf["documentFormatting"]["reportTables"]:
return self.presType_layout
if role in (controlTypes.ROLE_TABLEROW,controlTypes.ROLE_TABLECOLUMN,controlTypes.ROLE_TABLECELL) and (not config.conf["documentFormatting"]["reportTables"] or not config.conf["documentFormatting"]["reportTableCellCoords"]):
return self.presType_layout
if role in (controlTypes.ROLE_TABLEROW,controlTypes.ROLE_TABLECOLUMN):
try:
table=self.table
except NotImplementedError:
table=None
if table:
# This is part of a real table, so the cells will report row/column information.
# Therefore, this object is just for layout.
return self.presType_layout
return self.presType_content
def _get_simpleParent(self):
obj=self.parent
while obj and obj.presentationType!=self.presType_content:
obj=obj.parent
return obj
def _findSimpleNext(self,useChild=False,useParent=True,goPrevious=False):
nextPrevAttrib="next" if not goPrevious else "previous"
firstLastChildAttrib="firstChild" if not goPrevious else "lastChild"
found=None
if useChild:
child=getattr(self,firstLastChildAttrib)
childPresType=child.presentationType if child else None
if childPresType==self.presType_content:
found=child
elif childPresType==self.presType_layout:
found=child._findSimpleNext(useChild=True,useParent=False,goPrevious=goPrevious)
elif child:
found=child._findSimpleNext(useChild=False,useParent=False,goPrevious=goPrevious)
if found:
return found
next=getattr(self,nextPrevAttrib)
nextPresType=next.presentationType if next else None
if nextPresType==self.presType_content:
found=next
elif nextPresType==self.presType_layout:
found=next._findSimpleNext(useChild=True,useParent=False,goPrevious=goPrevious)
elif next:
found=next._findSimpleNext(useChild=False,useParent=False,goPrevious=goPrevious)
if found:
return found
parent=self.parent if useParent else None
while parent and parent.presentationType!=self.presType_content:
next=parent._findSimpleNext(useChild=False,useParent=False,goPrevious=goPrevious)
if next:
return next
parent=parent.parent
def _get_simpleNext(self):
return self._findSimpleNext()
def _get_simplePrevious(self):
return self._findSimpleNext(goPrevious=True)
def _get_simpleFirstChild(self):
child=self.firstChild
if not child:
return None
presType=child.presentationType
if presType!=self.presType_content: return child._findSimpleNext(useChild=(presType!=self.presType_unavailable),useParent=False)
return child
def _get_simpleLastChild(self):
child=self.lastChild
if not child:
return None
presType=child.presentationType
if presType!=self.presType_content: return child._findSimpleNext(useChild=(presType!=self.presType_unavailable),useParent=False,goPrevious=True)
return child
def _get_childCount(self):
"""Retreaves the number of children this object contains.
@rtype: int
"""
return len(self.children)
def _get_activeChild(self):
"""Retreaves the child of this object that currently has, or contains, the focus.
@return: the active child if it has one else None
@rtype: L{NVDAObject} or None
"""
return None
def _get_isFocusable(self):
"""Whether this object is focusable.
@rtype: bool
"""
return controlTypes.STATE_FOCUSABLE in self.states
def _get_hasFocus(self):
"""Whether this object has focus.
@rtype: bool
"""
return controlTypes.STATE_FOCUSED in self.states
def setFocus(self):
"""
Tries to force this object to take the focus.
"""
pass
def scrollIntoView(self):
"""Scroll this object into view on the screen if possible.
"""
raise NotImplementedError
def _get_labeledBy(self):
"""Retreaves the object that this object is labeled by (example: the static text label beside an edit field).
@return: the label object if it has one else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_positionInfo(self):
"""Retreaves position information for this object such as its level, its index with in a group, and the number of items in that group.
@return: a dictionary containing any of level, groupIndex and similarItemsInGroup.
@rtype: dict
"""
return {}
def _get_processID(self):
"""Retreaves an identifyer of the process this object is a part of.
@rtype: int
"""
raise NotImplementedError
def _get_isProtected(self):
"""
@return: True if this object is protected (hides its input for passwords), or false otherwise
@rtype: boolean
"""
return False
def _get_indexInParent(self):
"""The index of this object in its parent object.
@return: The 0 based index, C{None} if there is no parent.
@rtype: int
@raise NotImplementedError: If not supported by the underlying object.
"""
raise NotImplementedError
def _get_flowsTo(self):
"""The object to which content flows from this object.
@return: The object to which this object flows, C{None} if none.
@rtype: L{NVDAObject}
@raise NotImplementedError: If not supported by the underlying object.
"""
raise NotImplementedError
def _get_flowsFrom(self):
"""The object from which content flows to this object.
@return: The object from which this object flows, C{None} if none.
@rtype: L{NVDAObject}
@raise NotImplementedError: If not supported by the underlying object.
"""
raise NotImplementedError
def _get_isPresentableFocusAncestor(self):
"""Determine if this object should be presented to the user in the focus ancestry.
@return: C{True} if it should be presented in the focus ancestry, C{False} if not.
@rtype: bool
"""
if self.presentationType == self.presType_layout:
return False
if self.role in (controlTypes.ROLE_TREEVIEWITEM, controlTypes.ROLE_LISTITEM, controlTypes.ROLE_PROGRESSBAR, controlTypes.ROLE_EDITABLETEXT):
return False
return True
def _get_statusBar(self):
"""Finds the closest status bar in relation to this object.
@return: the found status bar else None
@rtype: L{NVDAObject} or None
"""
return None
def _get_isCurrent(self):
"""Gets the value that indicates whether this object is the current element in a set of related
elements. This maps to aria-current. Normally returns False. If this object is current
it will return one of the following values: True, "page", "step", "location", "date", "time"
"""
return False
def reportFocus(self):
"""Announces this object in a way suitable such that it gained focus.
"""
speech.speakObject(self,reason=controlTypes.REASON_FOCUS)
def _reportErrorInPreviousWord(self):
try:
# self might be a descendant of the text control; e.g. Symphony.
# We want to deal with the entire text, so use the caret object.
info = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)
# This gets called for characters which might end a word; e.g. space.
# The character before the caret is the word end.
# The one before that is the last of the word, which is what we want.
info.move(textInfos.UNIT_CHARACTER, -2)
info.expand(textInfos.UNIT_CHARACTER)
fields = info.getTextWithFields()
except RuntimeError:
return
except:
# Focus probably moved.
log.debugWarning("Error fetching last character of previous word", exc_info=True)
return
for command in fields:
if isinstance(command, textInfos.FieldCommand) and command.command == "formatChange" and command.field.get("invalid-spelling"):
break
else:
# No error.
return
import nvwave
nvwave.playWaveFile(r"waves\textError.wav")
def event_typedCharacter(self,ch):
if config.conf["documentFormatting"]["reportSpellingErrors"] and config.conf["keyboard"]["alertForSpellingErrors"] and (
# Not alpha, apostrophe or control.
ch.isspace() or (ch >= u" " and ch not in u"'\x7f" and not ch.isalpha())
):
# Reporting of spelling errors is enabled and this character ends a word.
self._reportErrorInPreviousWord()
speech.speakTypedCharacters(ch)
import winUser
if config.conf["keyboard"]["beepForLowercaseWithCapslock"] and ch.islower() and winUser.getKeyState(winUser.VK_CAPITAL)&1:
import tones
tones.beep(3000,40)
def event_mouseMove(self,x,y):
if not self._mouseEntered and config.conf['mouse']['reportObjectRoleOnMouseEnter']:
speech.cancelSpeech()
speech.speakObjectProperties(self,role=True)
speechWasCanceled=True
else:
speechWasCanceled=False
self._mouseEntered=True
try:
info=self.makeTextInfo(textInfos.Point(x,y))
except NotImplementedError:
info=NVDAObjectTextInfo(self,textInfos.POSITION_FIRST)
except LookupError:
return
if config.conf["reviewCursor"]["followMouse"]:
api.setReviewPosition(info)
info.expand(info.unit_mouseChunk)
oldInfo=getattr(self,'_lastMouseTextInfoObject',None)
self._lastMouseTextInfoObject=info
if not oldInfo or info.__class__!=oldInfo.__class__ or info.compareEndPoints(oldInfo,"startToStart")!=0 or info.compareEndPoints(oldInfo,"endToEnd")!=0:
text=info.text
notBlank=False
if text:
for ch in text:
if not ch.isspace() and ch!=u'\ufffc':
notBlank=True
if notBlank:
if not speechWasCanceled:
speech.cancelSpeech()
speech.speakText(text)
def event_stateChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self,states=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_focusEntered(self):
if self.role in (controlTypes.ROLE_MENUBAR,controlTypes.ROLE_POPUPMENU,controlTypes.ROLE_MENUITEM):
speech.cancelSpeech()
return
if self.isPresentableFocusAncestor:
speech.speakObject(self,reason=controlTypes.REASON_FOCUSENTERED)
def event_gainFocus(self):
"""
This code is executed if a gain focus event is received by this object.
"""
self.reportFocus()
braille.handler.handleGainFocus(self)
def event_foreground(self):
"""Called when the foreground window changes.
This method should only perform tasks specific to the foreground window changing.
L{event_focusEntered} or L{event_gainFocus} will be called for this object, so this method should not speak/braille the object, etc.
"""
speech.cancelSpeech()
def event_becomeNavigatorObject(self):
"""Called when this object becomes the navigator object.
"""
braille.handler.handleReviewMove()
def event_valueChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self, value=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_nameChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self, name=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_descriptionChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self, description=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_caret(self):
if self is api.getFocusObject() and not eventHandler.isPendingEvents("gainFocus"):
braille.handler.handleCaretMove(self)
review.handleCaretMove(self)
def _get_flatReviewPosition(self):
"""Locates a TextInfo positioned at this object, in the closest flat review."""
parent=self.simpleParent
while parent:
ti=parent.treeInterceptor
if ti and self in ti and ti.rootNVDAObject==parent:
return ti.makeTextInfo(self)
if issubclass(parent.TextInfo,DisplayModelTextInfo):
try:
return parent.makeTextInfo(api.getReviewPosition().pointAtStart)
except (NotImplementedError,LookupError):
pass
try:
return parent.makeTextInfo(self)
except (NotImplementedError,RuntimeError):
pass
return parent.makeTextInfo(textInfos.POSITION_FIRST)
parent=parent.simpleParent
def _get_basicText(self):
newTime=time.time()
oldTime=getattr(self,'_basicTextTime',0)
if newTime-oldTime>0.5:
self._basicText=u" ".join([x for x in self.name, self.value, self.description if isinstance(x, basestring) and len(x) > 0 and not x.isspace()])
if len(self._basicText)==0:
self._basicText=u""
else:
self._basicTextTime=newTime
return self._basicText
def makeTextInfo(self,position):
return self.TextInfo(self,position)
@staticmethod
def _formatLongDevInfoString(string, truncateLen=250):
"""Format a potentially long string value for inclusion in devInfo.
This should be used for arbitrary string values which aren't usually useful in debugging past a certain length.
If the string is too long to be useful, it will be truncated.
This string should be included as returned. There is no need to call repr.
@param string: The string to format.
@type string: nbasestring
@param truncateLen: The length at which to truncate the string.
@type truncateLen: int
@return: The formatted string.
@rtype: basestring
"""
if isinstance(string, basestring) and len(string) > truncateLen:
return "%r (truncated)" % string[:truncateLen]
return repr(string)
def _get_devInfo(self):
"""Information about this object useful to developers.
Subclasses may extend this, calling the superclass property first.
@return: A list of text strings providing information about this object useful to developers.
@rtype: list of str
"""
info = []
try:
ret = repr(self.name)
except Exception as e:
ret = "exception: %s" % e
info.append("name: %s" % ret)
try:
ret = self.role
for name, const in controlTypes.__dict__.iteritems():
if name.startswith("ROLE_") and ret == const:
ret = name
break
except Exception as e:
ret = "exception: %s" % e
info.append("role: %s" % ret)
try:
stateConsts = dict((const, name) for name, const in controlTypes.__dict__.iteritems() if name.startswith("STATE_"))
ret = ", ".join(
stateConsts.get(state) or str(state)
for state in self.states)
except Exception as e:
ret = "exception: %s" % e
info.append("states: %s" % ret)
try:
ret = repr(self.isFocusable)
except Exception as e:
ret = "exception: %s" % e
info.append("isFocusable: %s" % ret)
try:
ret = repr(self.hasFocus)
except Exception as e:
ret = "exception: %s" % e
info.append("hasFocus: %s" % ret)
try:
ret = repr(self)
except Exception as e:
ret = "exception: %s" % e
info.append("Python object: %s" % ret)
try:
ret = repr(self.__class__.__mro__)
except Exception as e:
ret = "exception: %s" % e
info.append("Python class mro: %s" % ret)
try:
ret = repr(self.description)
except Exception as e:
ret = "exception: %s" % e
info.append("description: %s" % ret)
try:
ret = repr(self.location)
except Exception as e:
ret = "exception: %s" % e
info.append("location: %s" % ret)
formatLong = self._formatLongDevInfoString
try:
ret = formatLong(self.value)
except Exception as e:
ret = "exception: %s" % e
info.append("value: %s" % ret)
try:
ret = repr(self.appModule)
except Exception as e:
ret = "exception: %s" % e
info.append("appModule: %s" % ret)
try:
ret = repr(self.appModule.productName)
except Exception as e:
ret = "exception: %s" % e
info.append("appModule.productName: %s" % ret)
try:
ret = repr(self.appModule.productVersion)
except Exception as e:
ret = "exception: %s" % e
info.append("appModule.productVersion: %s" % ret)
try:
ret = repr(self.TextInfo)
except Exception as e:
ret = "exception: %s" % e
info.append("TextInfo: %s" % ret)
return info
def _get_sleepMode(self):
"""Whether NVDA should sleep for this object (e.g. it is self-voicing).
If C{True}, all events and script requests for this object are silently dropped.
@rtype: bool
"""
if self.appModule:
return self.appModule.sleepMode
return False
# Don't cache sleepMode, as it is derived from a property which might change
# and we want the changed value immediately.
_cache_sleepMode = False
def _get_mathMl(self):
"""Obtain the MathML markup for an object containing math content.
This will only be called (and thus only needs to be implemented) for
objects with a role of L{controlTypes.ROLE_MATH}.
@raise LookupError: If MathML can't be retrieved for this object.
"""
raise NotImplementedError
#: The language/locale of this object.
#: @type: basestring
language = None
| 1 | 20,043 | Just a note that this feels weird being on the base NVDAObject rather than IAccessible, but right now, I understand that's how it has to be because we fire MSAA caret events on the focus object regardless of whether it's IAccessible. I think we should consider restricting these caret events to focus objects that are IAccessible subclasses in future, but that change is probably too risky for this PR. CC @MichaelDCurran for his thoughts. | nvaccess-nvda | py |
@@ -71,7 +71,9 @@ class PaymentController extends AdminBaseController
{
$paymentEditData = $this->paymentEditDataFactory->createDefault();
- $form = $this->createForm(PaymentEditFormType::class, $paymentEditData);
+ $form = $this->createForm(PaymentEditFormType::class, $paymentEditData, [
+ 'payment_detail' => null,
+ ]);
$form->handleRequest($request);
if ($form->isSubmitted() && $form->isValid()) { | 1 | <?php
namespace Shopsys\FrameworkBundle\Controller\Admin;
use Sensio\Bundle\FrameworkExtraBundle\Configuration\Route;
use Shopsys\FrameworkBundle\Component\Controller\AdminBaseController;
use Shopsys\FrameworkBundle\Component\Router\Security\Annotation\CsrfProtection;
use Shopsys\FrameworkBundle\Form\Admin\Payment\PaymentEditFormType;
use Shopsys\FrameworkBundle\Model\AdminNavigation\Breadcrumb;
use Shopsys\FrameworkBundle\Model\AdminNavigation\MenuItem;
use Shopsys\FrameworkBundle\Model\Payment\Detail\PaymentDetailFactory;
use Shopsys\FrameworkBundle\Model\Payment\Grid\PaymentGridFactory;
use Shopsys\FrameworkBundle\Model\Payment\PaymentEditDataFactory;
use Shopsys\FrameworkBundle\Model\Payment\PaymentFacade;
use Shopsys\FrameworkBundle\Model\Pricing\Currency\CurrencyFacade;
use Symfony\Component\HttpFoundation\Request;
class PaymentController extends AdminBaseController
{
/**
* @var \Shopsys\FrameworkBundle\Model\AdminNavigation\Breadcrumb
*/
private $breadcrumb;
/**
* @var \Shopsys\FrameworkBundle\Model\Payment\Detail\PaymentDetailFactory
*/
private $paymentDetailFactory;
/**
* @var \Shopsys\FrameworkBundle\Model\Payment\Grid\PaymentGridFactory
*/
private $paymentGridFactory;
/**
* @var \Shopsys\FrameworkBundle\Model\Payment\PaymentEditDataFactory
*/
private $paymentEditDataFactory;
/**
* @var \Shopsys\FrameworkBundle\Model\Payment\PaymentFacade
*/
private $paymentFacade;
/**
* @var \Shopsys\FrameworkBundle\Model\Pricing\Currency\CurrencyFacade
*/
private $currencyFacade;
public function __construct(
PaymentEditDataFactory $paymentEditDataFactory,
CurrencyFacade $currencyFacade,
PaymentFacade $paymentFacade,
PaymentDetailFactory $paymentDetailFactory,
PaymentGridFactory $paymentGridFactory,
Breadcrumb $breadcrumb
) {
$this->paymentEditDataFactory = $paymentEditDataFactory;
$this->currencyFacade = $currencyFacade;
$this->paymentFacade = $paymentFacade;
$this->paymentDetailFactory = $paymentDetailFactory;
$this->paymentGridFactory = $paymentGridFactory;
$this->breadcrumb = $breadcrumb;
}
/**
* @Route("/payment/new/")
* @param \Symfony\Component\HttpFoundation\Request $request
*/
public function newAction(Request $request)
{
$paymentEditData = $this->paymentEditDataFactory->createDefault();
$form = $this->createForm(PaymentEditFormType::class, $paymentEditData);
$form->handleRequest($request);
if ($form->isSubmitted() && $form->isValid()) {
$payment = $this->paymentFacade->create($paymentEditData);
$this->getFlashMessageSender()->addSuccessFlashTwig(
t('Payment <strong><a href="{{ url }}">{{ name }}</a></strong> created'),
[
'name' => $payment->getName(),
'url' => $this->generateUrl('admin_payment_edit', ['id' => $payment->getId()]),
]
);
return $this->redirectToRoute('admin_transportandpayment_list');
}
if ($form->isSubmitted() && !$form->isValid()) {
$this->getFlashMessageSender()->addErrorFlashTwig(t('Please check the correctness of all data filled.'));
}
return $this->render('@ShopsysFramework/Admin/Content/Payment/new.html.twig', [
'form' => $form->createView(),
'currencies' => $this->currencyFacade->getAllIndexedById(),
]);
}
/**
* @Route("/payment/edit/{id}", requirements={"id" = "\d+"})
* @param \Symfony\Component\HttpFoundation\Request $request
* @param int $id
*/
public function editAction(Request $request, $id)
{
$payment = $this->paymentFacade->getById($id);
$paymentEditData = $this->paymentEditDataFactory->createFromPayment($payment);
$form = $this->createForm(PaymentEditFormType::class, $paymentEditData);
$form->handleRequest($request);
if ($form->isSubmitted() && $form->isValid()) {
$this->paymentFacade->edit($payment, $paymentEditData);
$this->getFlashMessageSender()->addSuccessFlashTwig(
t('Payment <strong><a href="{{ url }}">{{ name }}</a></strong> modified'),
[
'name' => $payment->getName(),
'url' => $this->generateUrl('admin_payment_edit', ['id' => $payment->getId()]),
]
);
return $this->redirectToRoute('admin_transportandpayment_list');
}
if ($form->isSubmitted() && !$form->isValid()) {
$this->getFlashMessageSender()->addErrorFlashTwig(t('Please check the correctness of all data filled.'));
}
$this->breadcrumb->overrideLastItem(new MenuItem(t('Editing payment - %name%', ['%name%' => $payment->getName()])));
return $this->render('@ShopsysFramework/Admin/Content/Payment/edit.html.twig', [
'form' => $form->createView(),
'paymentDetail' => $this->paymentDetailFactory->createDetailForPayment($payment),
'currencies' => $this->currencyFacade->getAllIndexedById(),
]);
}
/**
* @Route("/payment/delete/{id}", requirements={"id" = "\d+"})
* @CsrfProtection
* @param int $id
*/
public function deleteAction($id)
{
try {
$paymentName = $this->paymentFacade->getById($id)->getName();
$this->paymentFacade->deleteById($id);
$this->getFlashMessageSender()->addSuccessFlashTwig(
t('Payment <strong>{{ name }}</strong> deleted'),
[
'name' => $paymentName,
]
);
} catch (\Shopsys\FrameworkBundle\Model\Payment\Exception\PaymentNotFoundException $ex) {
$this->getFlashMessageSender()->addErrorFlash(t('Selected payment doesn\'t exist.'));
}
return $this->redirectToRoute('admin_transportandpayment_list');
}
public function listAction()
{
$grid = $this->paymentGridFactory->create();
return $this->render('@ShopsysFramework/Admin/Content/Payment/list.html.twig', [
'gridView' => $grid->createView(),
]);
}
}
| 1 | 9,839 | This change and other similar ones should not be part of this commit. This commit is about adding a ImageUploadType not fixing controllers and stuff. | shopsys-shopsys | php |
@@ -57,6 +57,7 @@ const (
leafNodeConnectEventSubj = "$SYS.ACCOUNT.%s.LEAFNODE.CONNECT" // for internal use only
remoteLatencyEventSubj = "$SYS.LATENCY.M2.%s"
inboxRespSubj = "$SYS._INBOX.%s.%s"
+ accConnzReqSubj = "$SYS.REQ.ACCOUNT.PING.CONNZ"
// FIXME(dlc) - Should account scope, even with wc for now, but later on
// we can then shard as needed. | 1 | // Copyright 2018-2021 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net/http"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nats-server/v2/server/pse"
)
const (
accLookupReqTokens = 6
accLookupReqSubj = "$SYS.REQ.ACCOUNT.%s.CLAIMS.LOOKUP"
accPackReqSubj = "$SYS.REQ.CLAIMS.PACK"
accListReqSubj = "$SYS.REQ.CLAIMS.LIST"
accClaimsReqSubj = "$SYS.REQ.CLAIMS.UPDATE"
accDeleteReqSubj = "$SYS.REQ.CLAIMS.DELETE"
connectEventSubj = "$SYS.ACCOUNT.%s.CONNECT"
disconnectEventSubj = "$SYS.ACCOUNT.%s.DISCONNECT"
accReqSubj = "$SYS.REQ.ACCOUNT.%s.%s"
// kept for backward compatibility when using http resolver
// this overlaps with the names for events but you'd have to have the operator private key in order to succeed.
accUpdateEventSubjOld = "$SYS.ACCOUNT.%s.CLAIMS.UPDATE"
accUpdateEventSubjNew = "$SYS.REQ.ACCOUNT.%s.CLAIMS.UPDATE"
connsRespSubj = "$SYS._INBOX_.%s"
accConnsEventSubjNew = "$SYS.ACCOUNT.%s.SERVER.CONNS"
accConnsEventSubjOld = "$SYS.SERVER.ACCOUNT.%s.CONNS" // kept for backward compatibility
shutdownEventSubj = "$SYS.SERVER.%s.SHUTDOWN"
authErrorEventSubj = "$SYS.SERVER.%s.CLIENT.AUTH.ERR"
serverStatsSubj = "$SYS.SERVER.%s.STATSZ"
serverDirectReqSubj = "$SYS.REQ.SERVER.%s.%s"
serverPingReqSubj = "$SYS.REQ.SERVER.PING.%s"
serverStatsPingReqSubj = "$SYS.REQ.SERVER.PING" // use $SYS.REQ.SERVER.PING.STATSZ instead
leafNodeConnectEventSubj = "$SYS.ACCOUNT.%s.LEAFNODE.CONNECT" // for internal use only
remoteLatencyEventSubj = "$SYS.LATENCY.M2.%s"
inboxRespSubj = "$SYS._INBOX.%s.%s"
// FIXME(dlc) - Should account scope, even with wc for now, but later on
// we can then shard as needed.
accNumSubsReqSubj = "$SYS.REQ.ACCOUNT.NSUBS"
// These are for exported debug services. These are local to this server only.
accSubsSubj = "$SYS.DEBUG.SUBSCRIBERS"
shutdownEventTokens = 4
serverSubjectIndex = 2
accUpdateTokensNew = 6
accUpdateTokensOld = 5
accUpdateAccIdxOld = 2
accReqTokens = 5
accReqAccIndex = 3
)
// FIXME(dlc) - make configurable.
var eventsHBInterval = 30 * time.Second
// Used to send and receive messages from inside the server.
type internal struct {
account *Account
client *client
seq uint64
sid int
servers map[string]*serverUpdate
sweeper *time.Timer
stmr *time.Timer
replies map[string]msgHandler
sendq chan *pubMsg
resetCh chan struct{}
wg sync.WaitGroup
sq *sendq
orphMax time.Duration
chkOrph time.Duration
statsz time.Duration
cstatsz time.Duration
shash string
inboxPre string
}
// ServerStatsMsg is sent periodically with stats updates.
type ServerStatsMsg struct {
Server ServerInfo `json:"server"`
Stats ServerStats `json:"statsz"`
}
// ConnectEventMsg is sent when a new connection is made that is part of an account.
type ConnectEventMsg struct {
TypedEvent
Server ServerInfo `json:"server"`
Client ClientInfo `json:"client"`
}
// ConnectEventMsgType is the schema type for ConnectEventMsg
const ConnectEventMsgType = "io.nats.server.advisory.v1.client_connect"
// DisconnectEventMsg is sent when a new connection previously defined from a
// ConnectEventMsg is closed.
type DisconnectEventMsg struct {
TypedEvent
Server ServerInfo `json:"server"`
Client ClientInfo `json:"client"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
Reason string `json:"reason"`
}
// DisconnectEventMsgType is the schema type for DisconnectEventMsg
const DisconnectEventMsgType = "io.nats.server.advisory.v1.client_disconnect"
// AccountNumConns is an event that will be sent from a server that is tracking
// a given account when the number of connections changes. It will also HB
// updates in the absence of any changes.
type AccountNumConns struct {
TypedEvent
Server ServerInfo `json:"server"`
Account string `json:"acc"`
Conns int `json:"conns"`
LeafNodes int `json:"leafnodes"`
TotalConns int `json:"total_conns"`
}
const AccountNumConnsMsgType = "io.nats.server.advisory.v1.account_connections"
// accNumConnsReq is sent when we are starting to track an account for the first
// time. We will request others send info to us about their local state.
type accNumConnsReq struct {
Server ServerInfo `json:"server"`
Account string `json:"acc"`
}
// ServerInfo identifies remote servers.
type ServerInfo struct {
Name string `json:"name"`
Host string `json:"host"`
ID string `json:"id"`
Cluster string `json:"cluster,omitempty"`
Domain string `json:"domain,omitempty"`
Version string `json:"ver"`
Seq uint64 `json:"seq"`
JetStream bool `json:"jetstream"`
Time time.Time `json:"time"`
}
// ClientInfo is detailed information about the client forming a connection.
type ClientInfo struct {
Start *time.Time `json:"start,omitempty"`
Host string `json:"host,omitempty"`
ID uint64 `json:"id,omitempty"`
Account string `json:"acc"`
Service string `json:"svc,omitempty"`
User string `json:"user,omitempty"`
Name string `json:"name,omitempty"`
Lang string `json:"lang,omitempty"`
Version string `json:"ver,omitempty"`
RTT time.Duration `json:"rtt,omitempty"`
Server string `json:"server,omitempty"`
Cluster string `json:"cluster,omitempty"`
Stop *time.Time `json:"stop,omitempty"`
Jwt string `json:"jwt,omitempty"`
IssuerKey string `json:"issuer_key,omitempty"`
NameTag string `json:"name_tag,omitempty"`
Tags jwt.TagList `json:"tags,omitempty"`
Kind string `json:"kind,omitempty"`
ClientType string `json:"client_type,omitempty"`
}
// ServerStats hold various statistics that we will periodically send out.
type ServerStats struct {
Start time.Time `json:"start"`
Mem int64 `json:"mem"`
Cores int `json:"cores"`
CPU float64 `json:"cpu"`
Connections int `json:"connections"`
TotalConnections uint64 `json:"total_connections"`
ActiveAccounts int `json:"active_accounts"`
NumSubs uint32 `json:"subscriptions"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
SlowConsumers int64 `json:"slow_consumers"`
Routes []*RouteStat `json:"routes,omitempty"`
Gateways []*GatewayStat `json:"gateways,omitempty"`
ActiveServers int `json:"active_servers,omitempty"`
JetStream *JetStreamVarz `json:"jetstream,omitempty"`
}
// RouteStat holds route statistics.
type RouteStat struct {
ID uint64 `json:"rid"`
Name string `json:"name,omitempty"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
Pending int `json:"pending"`
}
// GatewayStat holds gateway statistics.
type GatewayStat struct {
ID uint64 `json:"gwid"`
Name string `json:"name"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
NumInbound int `json:"inbound_connections"`
}
// DataStats reports how may msg and bytes. Applicable for both sent and received.
type DataStats struct {
Msgs int64 `json:"msgs"`
Bytes int64 `json:"bytes"`
}
// Used for internally queueing up messages that the server wants to send.
type pubMsg struct {
c *client
sub string
rply string
si *ServerInfo
msg interface{}
last bool
}
// Used to track server updates.
type serverUpdate struct {
seq uint64
ltime time.Time
}
// TypedEvent is a event or advisory sent by the server that has nats type hints
// typically used for events that might be consumed by 3rd party event systems
type TypedEvent struct {
Type string `json:"type"`
ID string `json:"id"`
Time time.Time `json:"timestamp"`
}
// internalSendLoop will be responsible for serializing all messages that
// a server wants to send.
func (s *Server) internalSendLoop(wg *sync.WaitGroup) {
defer wg.Done()
RESET:
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.Unlock()
return
}
sysc := s.sys.client
resetCh := s.sys.resetCh
sendq := s.sys.sendq
id := s.info.ID
host := s.info.Host
servername := s.info.Name
domain := s.info.Domain
seqp := &s.sys.seq
js := s.info.JetStream
cluster := s.info.Cluster
if s.gateway.enabled {
cluster = s.getGatewayName()
}
s.mu.Unlock()
// Warn when internal send queue is backed up past 75%
warnThresh := 3 * internalSendQLen / 4
warnFreq := time.Second
last := time.Now().Add(-warnFreq)
for s.eventsRunning() {
// Setup information for next message
if len(sendq) > warnThresh && time.Since(last) >= warnFreq {
s.Warnf("Internal system send queue > 75%%")
last = time.Now()
}
select {
case pm := <-sendq:
if pm.si != nil {
pm.si.Name = servername
pm.si.Domain = domain
pm.si.Host = host
pm.si.Cluster = cluster
pm.si.ID = id
pm.si.Seq = atomic.AddUint64(seqp, 1)
pm.si.Version = VERSION
pm.si.Time = time.Now().UTC()
pm.si.JetStream = js
}
var b []byte
if pm.msg != nil {
switch v := pm.msg.(type) {
case string:
b = []byte(v)
case []byte:
b = v
default:
b, _ = json.Marshal(pm.msg)
}
}
// Setup our client. If the user wants to use a non-system account use our internal
// account scoped here so that we are not changing out accounts for the system client.
var c *client
if pm.c != nil {
c = pm.c
} else {
c = sysc
}
// Grab client lock.
c.mu.Lock()
// Prep internal structures needed to send message.
c.pa.subject = []byte(pm.sub)
c.pa.size = len(b)
c.pa.szb = []byte(strconv.FormatInt(int64(len(b)), 10))
c.pa.reply = []byte(pm.rply)
trace := c.trace
c.mu.Unlock()
// Add in NL
b = append(b, _CRLF_...)
if trace {
c.traceInOp(fmt.Sprintf("PUB %s %s %d", c.pa.subject, c.pa.reply, c.pa.size), nil)
c.traceMsg(b)
}
// Process like a normal inbound msg.
c.processInboundClientMsg(b)
// See if we are doing graceful shutdown.
if !pm.last {
c.flushClients(0) // Never spend time in place.
} else {
// For the Shutdown event, we need to send in place otherwise
// there is a chance that the process will exit before the
// writeLoop has a chance to send it.
c.flushClients(time.Second)
return
}
case <-resetCh:
goto RESET
case <-s.quitCh:
return
}
}
}
// Will send a shutdown message.
func (s *Server) sendShutdownEvent() {
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.Unlock()
return
}
subj := fmt.Sprintf(shutdownEventSubj, s.info.ID)
sendq := s.sys.sendq
// Stop any more messages from queueing up.
s.sys.sendq = nil
// Unhook all msgHandlers. Normal client cleanup will deal with subs, etc.
s.sys.replies = nil
s.mu.Unlock()
// Send to the internal queue and mark as last.
si := &ServerInfo{}
sendq <- &pubMsg{nil, subj, _EMPTY_, si, si, true}
}
// Used to send an internal message to an arbitrary account.
func (s *Server) sendInternalAccountMsg(a *Account, subject string, msg interface{}) error {
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.Unlock()
return ErrNoSysAccount
}
sendq := s.sys.sendq
// Don't hold lock while placing on the channel.
c := s.sys.client
s.mu.Unlock()
// Replace our client with the account's internal client.
if a != nil {
a.mu.Lock()
c = a.internalClient()
a.mu.Unlock()
}
sendq <- &pubMsg{c, subject, _EMPTY_, nil, msg, false}
return nil
}
// This will queue up a message to be sent.
// Lock should not be held.
func (s *Server) sendInternalMsgLocked(sub, rply string, si *ServerInfo, msg interface{}) {
s.mu.Lock()
s.sendInternalMsg(sub, rply, si, msg)
s.mu.Unlock()
}
// This will queue up a message to be sent.
// Assumes lock is held on entry.
func (s *Server) sendInternalMsg(sub, rply string, si *ServerInfo, msg interface{}) {
if s.sys == nil || s.sys.sendq == nil {
return
}
sendq := s.sys.sendq
// Don't hold lock while placing on the channel.
s.mu.Unlock()
sendq <- &pubMsg{nil, sub, rply, si, msg, false}
s.mu.Lock()
}
// Used to send internal messages from other system clients to avoid no echo issues.
func (c *client) sendInternalMsg(sub, rply string, si *ServerInfo, msg interface{}) {
if c == nil {
return
}
s := c.srv
if s == nil {
return
}
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
return
}
sendq := s.sys.sendq
// Don't hold lock while placing on the channel.
s.mu.Unlock()
sendq <- &pubMsg{c, sub, rply, si, msg, false}
}
// Locked version of checking if events system running. Also checks server.
func (s *Server) eventsRunning() bool {
if s == nil {
return false
}
s.mu.Lock()
er := s.running && s.eventsEnabled()
s.mu.Unlock()
return er
}
// EventsEnabled will report if the server has internal events enabled via
// a defined system account.
func (s *Server) EventsEnabled() bool {
s.mu.Lock()
ee := s.eventsEnabled()
s.mu.Unlock()
return ee
}
// eventsEnabled will report if events are enabled.
// Lock should be held.
func (s *Server) eventsEnabled() bool {
return s.sys != nil && s.sys.client != nil && s.sys.account != nil
}
// TrackedRemoteServers returns how many remote servers we are tracking
// from a system events perspective.
func (s *Server) TrackedRemoteServers() int {
s.mu.Lock()
if !s.running || !s.eventsEnabled() {
return -1
}
ns := len(s.sys.servers)
s.mu.Unlock()
return ns
}
// Check for orphan servers who may have gone away without notification.
// This should be wrapChk() to setup common locking.
func (s *Server) checkRemoteServers() {
now := time.Now()
for sid, su := range s.sys.servers {
if now.Sub(su.ltime) > s.sys.orphMax {
s.Debugf("Detected orphan remote server: %q", sid)
// Simulate it going away.
s.processRemoteServerShutdown(sid)
}
}
if s.sys.sweeper != nil {
s.sys.sweeper.Reset(s.sys.chkOrph)
}
}
// Grab RSS and PCPU
// Server lock will be held but released.
func (s *Server) updateServerUsage(v *ServerStats) {
s.mu.Unlock()
defer s.mu.Lock()
var vss int64
pse.ProcUsage(&v.CPU, &v.Mem, &vss)
v.Cores = numCores
}
// Generate a route stat for our statz update.
func routeStat(r *client) *RouteStat {
if r == nil {
return nil
}
r.mu.Lock()
rs := &RouteStat{
ID: r.cid,
Sent: DataStats{
Msgs: atomic.LoadInt64(&r.outMsgs),
Bytes: atomic.LoadInt64(&r.outBytes),
},
Received: DataStats{
Msgs: atomic.LoadInt64(&r.inMsgs),
Bytes: atomic.LoadInt64(&r.inBytes),
},
Pending: int(r.out.pb),
}
if r.route != nil {
rs.Name = r.route.remoteName
}
r.mu.Unlock()
return rs
}
// Actual send method for statz updates.
// Lock should be held.
func (s *Server) sendStatsz(subj string) {
var m ServerStatsMsg
s.updateServerUsage(&m.Stats)
m.Stats.Start = s.start
m.Stats.Connections = len(s.clients)
m.Stats.TotalConnections = s.totalClients
m.Stats.ActiveAccounts = int(atomic.LoadInt32(&s.activeAccounts))
m.Stats.Received.Msgs = atomic.LoadInt64(&s.inMsgs)
m.Stats.Received.Bytes = atomic.LoadInt64(&s.inBytes)
m.Stats.Sent.Msgs = atomic.LoadInt64(&s.outMsgs)
m.Stats.Sent.Bytes = atomic.LoadInt64(&s.outBytes)
m.Stats.SlowConsumers = atomic.LoadInt64(&s.slowConsumers)
m.Stats.NumSubs = s.numSubscriptions()
// Routes
for _, r := range s.routes {
m.Stats.Routes = append(m.Stats.Routes, routeStat(r))
}
// Gateways
if s.gateway.enabled {
gw := s.gateway
gw.RLock()
for name, c := range gw.out {
gs := &GatewayStat{Name: name}
c.mu.Lock()
gs.ID = c.cid
gs.Sent = DataStats{
Msgs: atomic.LoadInt64(&c.outMsgs),
Bytes: atomic.LoadInt64(&c.outBytes),
}
c.mu.Unlock()
// Gather matching inbound connections
gs.Received = DataStats{}
for _, c := range gw.in {
c.mu.Lock()
if c.gw.name == name {
gs.Received.Msgs += atomic.LoadInt64(&c.inMsgs)
gs.Received.Bytes += atomic.LoadInt64(&c.inBytes)
gs.NumInbound++
}
c.mu.Unlock()
}
m.Stats.Gateways = append(m.Stats.Gateways, gs)
}
gw.RUnlock()
}
// Active Servers
m.Stats.ActiveServers = 1
if s.sys != nil {
m.Stats.ActiveServers += len(s.sys.servers)
}
// JetStream
if js := s.js; js != nil {
jStat := &JetStreamVarz{}
s.mu.Unlock()
js.mu.RLock()
c := js.config
c.StoreDir = _EMPTY_
jStat.Config = &c
js.mu.RUnlock()
jStat.Stats = js.usageStats()
if mg := js.getMetaGroup(); mg != nil {
if mg.Leader() {
jStat.Meta = s.raftNodeToClusterInfo(mg)
} else {
// non leader only include a shortened version without peers
jStat.Meta = &ClusterInfo{
Name: s.ClusterName(),
Leader: s.serverNameForNode(mg.GroupLeader()),
}
}
}
m.Stats.JetStream = jStat
s.mu.Lock()
}
// Send message.
s.sendInternalMsg(subj, _EMPTY_, &m.Server, &m)
}
// Send out our statz update.
// This should be wrapChk() to setup common locking.
func (s *Server) heartbeatStatsz() {
if s.sys.stmr != nil {
// Increase after startup to our max.
s.sys.cstatsz *= 4
if s.sys.cstatsz > s.sys.statsz {
s.sys.cstatsz = s.sys.statsz
}
s.sys.stmr.Reset(s.sys.cstatsz)
}
s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID))
}
func (s *Server) sendStatszUpdate() {
s.mu.Lock()
s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID))
s.mu.Unlock()
}
// This should be wrapChk() to setup common locking.
func (s *Server) startStatszTimer() {
// We will start by sending out more of these and trail off to the statsz being the max.
s.sys.cstatsz = 250 * time.Millisecond
// Send out the first one after 250ms.
s.sys.stmr = time.AfterFunc(s.sys.cstatsz, s.wrapChk(s.heartbeatStatsz))
}
// Start a ticker that will fire periodically and check for orphaned servers.
// This should be wrapChk() to setup common locking.
func (s *Server) startRemoteServerSweepTimer() {
s.sys.sweeper = time.AfterFunc(s.sys.chkOrph, s.wrapChk(s.checkRemoteServers))
}
// Length of our system hash used for server targeted messages.
const sysHashLen = 8
// Computes a hash of 8 characters for the name.
func getHash(name string) []byte {
return getHashSize(name, sysHashLen)
}
// Returns the node name for this server which is a hash of the server name.
func (s *Server) Node() string {
s.mu.Lock()
defer s.mu.Unlock()
if s.sys != nil {
return s.sys.shash
}
return _EMPTY_
}
// This will setup our system wide tracking subs.
// For now we will setup one wildcard subscription to
// monitor all accounts for changes in number of connections.
// We can make this on a per account tracking basis if needed.
// Tradeoff is subscription and interest graph events vs connect and
// disconnect events, etc.
func (s *Server) initEventTracking() {
if !s.EventsEnabled() {
return
}
// Create a system hash which we use for other servers to target us specifically.
s.sys.shash = string(getHash(s.info.Name))
// This will be for all inbox responses.
subject := fmt.Sprintf(inboxRespSubj, s.sys.shash, "*")
if _, err := s.sysSubscribe(subject, s.inboxReply); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
s.sys.inboxPre = subject
// This is for remote updates for connection accounting.
subject = fmt.Sprintf(accConnsEventSubjOld, "*")
if _, err := s.sysSubscribe(subject, s.remoteConnsUpdate); err != nil {
s.Errorf("Error setting up internal tracking for %s: %v", subject, err)
}
// This will be for responses for account info that we send out.
subject = fmt.Sprintf(connsRespSubj, s.info.ID)
if _, err := s.sysSubscribe(subject, s.remoteConnsUpdate); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for broad requests to respond with number of subscriptions for a given subject.
if _, err := s.sysSubscribe(accNumSubsReqSubj, s.nsubsRequest); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for statsz from others.
subject = fmt.Sprintf(serverStatsSubj, "*")
if _, err := s.sysSubscribe(subject, s.remoteServerUpdate); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for all server shutdowns.
subject = fmt.Sprintf(shutdownEventSubj, "*")
if _, err := s.sysSubscribe(subject, s.remoteServerShutdown); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for account claims updates.
subscribeToUpdate := true
if s.accResolver != nil {
subscribeToUpdate = !s.accResolver.IsTrackingUpdate()
}
if subscribeToUpdate {
for _, sub := range []string{accUpdateEventSubjOld, accUpdateEventSubjNew} {
if _, err := s.sysSubscribe(fmt.Sprintf(sub, "*"), s.accountClaimUpdate); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
}
}
// Listen for ping messages that will be sent to all servers for statsz.
// This subscription is kept for backwards compatibility. Got replaced by ...PING.STATZ from below
if _, err := s.sysSubscribe(serverStatsPingReqSubj, s.statszReq); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
monSrvc := map[string]msgHandler{
"STATSZ": s.statszReq,
"VARZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
optz := &VarzEventOptions{}
s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Varz(&optz.VarzOptions) })
},
"SUBSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
optz := &SubszEventOptions{}
s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Subsz(&optz.SubszOptions) })
},
"CONNZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
optz := &ConnzEventOptions{}
s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Connz(&optz.ConnzOptions) })
},
"ROUTEZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
optz := &RoutezEventOptions{}
s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Routez(&optz.RoutezOptions) })
},
"GATEWAYZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
optz := &GatewayzEventOptions{}
s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Gatewayz(&optz.GatewayzOptions) })
},
"LEAFZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
optz := &LeafzEventOptions{}
s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Leafz(&optz.LeafzOptions) })
},
"ACCOUNTZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
optz := &AccountzEventOptions{}
s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Accountz(&optz.AccountzOptions) })
},
"JSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
optz := &JszEventOptions{}
s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Jsz(&optz.JSzOptions) })
},
}
for name, req := range monSrvc {
subject = fmt.Sprintf(serverDirectReqSubj, s.info.ID, name)
if _, err := s.sysSubscribe(subject, req); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
subject = fmt.Sprintf(serverPingReqSubj, name)
if _, err := s.sysSubscribe(subject, req); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
}
extractAccount := func(subject string) (string, error) {
if tk := strings.Split(subject, tsep); len(tk) != accReqTokens {
return "", fmt.Errorf("subject %q is malformed", subject)
} else {
return tk[accReqAccIndex], nil
}
}
monAccSrvc := map[string]msgHandler{
"SUBSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
optz := &SubszEventOptions{}
s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
optz.SubszOptions.Subscriptions = true
optz.SubszOptions.Account = acc
return s.Subsz(&optz.SubszOptions)
}
})
},
"CONNZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
optz := &ConnzEventOptions{}
s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
optz.ConnzOptions.Account = acc
return s.Connz(&optz.ConnzOptions)
}
})
},
"LEAFZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
optz := &LeafzEventOptions{}
s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
optz.LeafzOptions.Account = acc
return s.Leafz(&optz.LeafzOptions)
}
})
},
"JSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
optz := &JszEventOptions{}
s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
optz.Account = acc
return s.JszAccount(&optz.JSzOptions)
}
})
},
"INFO": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
optz := &AccInfoEventOptions{}
s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
return s.accountInfo(acc)
}
})
},
"CONNS": s.connsRequest,
}
for name, req := range monAccSrvc {
if _, err := s.sysSubscribe(fmt.Sprintf(accReqSubj, "*", name), req); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
}
// Listen for updates when leaf nodes connect for a given account. This will
// force any gateway connections to move to `modeInterestOnly`
subject = fmt.Sprintf(leafNodeConnectEventSubj, "*")
if _, err := s.sysSubscribe(subject, s.leafNodeConnected); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// For tracking remote latency measurements.
subject = fmt.Sprintf(remoteLatencyEventSubj, s.sys.shash)
if _, err := s.sysSubscribe(subject, s.remoteLatencyUpdate); err != nil {
s.Errorf("Error setting up internal latency tracking: %v", err)
}
// This is for simple debugging of number of subscribers that exist in the system.
if _, err := s.sysSubscribeInternal(accSubsSubj, s.debugSubscribers); err != nil {
s.Errorf("Error setting up internal debug service for subscribers: %v", err)
}
}
// add all exports a system account will need
func (s *Server) addSystemAccountExports(sacc *Account) {
if !s.EventsEnabled() {
return
}
if err := sacc.AddServiceExport(accSubsSubj, nil); err != nil {
s.Errorf("Error adding system service export for %q: %v", accSubsSubj, err)
}
if s.JetStreamEnabled() {
s.checkJetStreamExports()
}
}
// accountClaimUpdate will receive claim updates for accounts.
func (s *Server) accountClaimUpdate(sub *subscription, _ *client, _ *Account, subject, resp string, msg []byte) {
if !s.EventsEnabled() {
return
}
var pubKey string
toks := strings.Split(subject, tsep)
if len(toks) == accUpdateTokensNew {
pubKey = toks[accReqAccIndex]
} else if len(toks) == accUpdateTokensOld {
pubKey = toks[accUpdateAccIdxOld]
} else {
s.Debugf("Received account claims update on bad subject %q", subject)
return
}
if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil {
respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err)
} else if claim.Subject != pubKey {
err := errors.New("subject does not match jwt content")
respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err)
} else if v, ok := s.accounts.Load(pubKey); !ok {
respondToUpdate(s, resp, pubKey, "jwt update skipped", nil)
} else if err := s.updateAccountWithClaimJWT(v.(*Account), string(msg)); err != nil {
respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err)
} else {
respondToUpdate(s, resp, pubKey, "jwt updated", nil)
}
}
// processRemoteServerShutdown will update any affected accounts.
// Will update the remote count for clients.
// Lock assume held.
func (s *Server) processRemoteServerShutdown(sid string) {
s.accounts.Range(func(k, v interface{}) bool {
v.(*Account).removeRemoteServer(sid)
return true
})
// Update any state in nodeInfo.
s.nodeToInfo.Range(func(k, v interface{}) bool {
si := v.(nodeInfo)
if si.id == sid {
si.offline = true
s.nodeToInfo.Store(k, si)
return false
}
return true
})
delete(s.sys.servers, sid)
}
func (s *Server) sameDomain(domain string) bool {
return domain == _EMPTY_ || s.info.Domain == _EMPTY_ || domain == s.info.Domain
}
// remoteServerShutdownEvent is called when we get an event from another server shutting down.
func (s *Server) remoteServerShutdown(sub *subscription, _ *client, _ *Account, subject, reply string, msg []byte) {
s.mu.Lock()
defer s.mu.Unlock()
if !s.eventsEnabled() {
return
}
toks := strings.Split(subject, tsep)
if len(toks) < shutdownEventTokens {
s.Debugf("Received remote server shutdown on bad subject %q", subject)
return
}
if len(msg) == 0 {
s.Errorf("Remote server sent invalid (empty) shutdown message to %q", subject)
return
}
// We have an optional serverInfo here, remove from nodeToX lookups.
var si ServerInfo
if err := json.Unmarshal(msg, &si); err != nil {
s.Debugf("Received bad server info for remote server shutdown")
return
}
// Additional processing here.
if !s.sameDomain(si.Domain) {
return
}
node := string(getHash(si.Name))
s.nodeToInfo.Store(node, nodeInfo{si.Name, si.Cluster, si.Domain, si.ID, true, true})
sid := toks[serverSubjectIndex]
if su := s.sys.servers[sid]; su != nil {
s.processRemoteServerShutdown(sid)
}
}
// remoteServerUpdate listens for statsz updates from other servers.
func (s *Server) remoteServerUpdate(sub *subscription, _ *client, _ *Account, subject, reply string, msg []byte) {
var ssm ServerStatsMsg
if err := json.Unmarshal(msg, &ssm); err != nil {
s.Debugf("Received bad server info for remote server update")
return
}
si := ssm.Server
if !s.sameDomain(si.Domain) {
return
}
node := string(getHash(si.Name))
s.nodeToInfo.Store(node, nodeInfo{si.Name, si.Cluster, si.Domain, si.ID, false, si.JetStream})
}
// updateRemoteServer is called when we have an update from a remote server.
// This allows us to track remote servers, respond to shutdown messages properly,
// make sure that messages are ordered, and allow us to prune dead servers.
// Lock should be held upon entry.
func (s *Server) updateRemoteServer(ms *ServerInfo) {
su := s.sys.servers[ms.ID]
if su == nil {
s.sys.servers[ms.ID] = &serverUpdate{ms.Seq, time.Now()}
s.processNewServer(ms)
} else {
// Should always be going up.
if ms.Seq <= su.seq {
s.Errorf("Received out of order remote server update from: %q", ms.ID)
return
}
su.seq = ms.Seq
su.ltime = time.Now()
}
}
// processNewServer will hold any logic we want to use when we discover a new server.
// Lock should be held upon entry.
func (s *Server) processNewServer(ms *ServerInfo) {
// Right now we only check if we have leafnode servers and if so send another
// connect update to make sure they switch this account to interest only mode.
s.ensureGWsInterestOnlyForLeafNodes()
// Add to our nodeToName
if s.sameDomain(ms.Domain) {
node := string(getHash(ms.Name))
s.nodeToInfo.Store(node, nodeInfo{ms.Name, ms.Cluster, ms.Domain, ms.ID, false, ms.JetStream})
}
// Announce ourselves..
s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID))
}
// If GW is enabled on this server and there are any leaf node connections,
// this function will send a LeafNode connect system event to the super cluster
// to ensure that the GWs are in interest-only mode for this account.
// Lock should be held upon entry.
// TODO(dlc) - this will cause this account to be loaded on all servers. Need a better
// way with GW2.
func (s *Server) ensureGWsInterestOnlyForLeafNodes() {
if !s.gateway.enabled || len(s.leafs) == 0 {
return
}
sent := make(map[*Account]bool, len(s.leafs))
for _, c := range s.leafs {
if !sent[c.acc] {
s.sendLeafNodeConnectMsg(c.acc.Name)
sent[c.acc] = true
}
}
}
// shutdownEventing will clean up all eventing state.
func (s *Server) shutdownEventing() {
if !s.eventsRunning() {
return
}
s.mu.Lock()
clearTimer(&s.sys.sweeper)
clearTimer(&s.sys.stmr)
sys := s.sys
s.mu.Unlock()
// We will queue up a shutdown event and wait for the
// internal send loop to exit.
s.sendShutdownEvent()
sys.wg.Wait()
close(sys.resetCh)
s.mu.Lock()
defer s.mu.Unlock()
// Whip through all accounts.
s.accounts.Range(func(k, v interface{}) bool {
v.(*Account).clearEventing()
return true
})
// Turn everything off here.
s.sys = nil
}
// Request for our local connection count.
func (s *Server) connsRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) {
if !s.eventsRunning() {
return
}
tk := strings.Split(subject, tsep)
if len(tk) != accReqTokens {
s.sys.client.Errorf("Bad subject account connections request message")
return
}
a := tk[accReqAccIndex]
m := accNumConnsReq{Account: a}
if _, msg := c.msgParts(rmsg); len(msg) > 0 {
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account connections request message: %v", err)
return
}
}
if m.Account != a {
s.sys.client.Errorf("Error unmarshalled account does not match subject")
return
}
// Here we really only want to lookup the account if its local. We do not want to fetch this
// account if we have no interest in it.
var acc *Account
if v, ok := s.accounts.Load(m.Account); ok {
acc = v.(*Account)
}
if acc == nil {
return
}
// We know this is a local connection.
if nlc := acc.NumLocalConnections(); nlc > 0 {
s.mu.Lock()
s.sendAccConnsUpdate(acc, reply)
s.mu.Unlock()
}
}
// leafNodeConnected is an event we will receive when a leaf node for a given account connects.
func (s *Server) leafNodeConnected(sub *subscription, _ *client, _ *Account, subject, reply string, msg []byte) {
m := accNumConnsReq{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account connections request message: %v", err)
return
}
s.mu.Lock()
na := m.Account == _EMPTY_ || !s.eventsEnabled() || !s.gateway.enabled
s.mu.Unlock()
if na {
return
}
if acc, _ := s.lookupAccount(m.Account); acc != nil {
s.switchAccountToInterestMode(acc.Name)
}
}
// Common filter options for system requests STATSZ VARZ SUBSZ CONNZ ROUTEZ GATEWAYZ LEAFZ
type EventFilterOptions struct {
Name string `json:"server_name,omitempty"` // filter by server name
Cluster string `json:"cluster,omitempty"` // filter by cluster name
Host string `json:"host,omitempty"` // filter by host name
Tags []string `json:"tags,omitempty"` // filter by tags (must match all tags)
Domain string `json:"domain,omitempty"` // filter by JS domain
}
// StatszEventOptions are options passed to Statsz
type StatszEventOptions struct {
// No actual options yet
EventFilterOptions
}
// Options for account Info
type AccInfoEventOptions struct {
// No actual options yet
EventFilterOptions
}
// In the context of system events, ConnzEventOptions are options passed to Connz
type ConnzEventOptions struct {
ConnzOptions
EventFilterOptions
}
// In the context of system events, RoutezEventOptions are options passed to Routez
type RoutezEventOptions struct {
RoutezOptions
EventFilterOptions
}
// In the context of system events, SubzEventOptions are options passed to Subz
type SubszEventOptions struct {
SubszOptions
EventFilterOptions
}
// In the context of system events, VarzEventOptions are options passed to Varz
type VarzEventOptions struct {
VarzOptions
EventFilterOptions
}
// In the context of system events, GatewayzEventOptions are options passed to Gatewayz
type GatewayzEventOptions struct {
GatewayzOptions
EventFilterOptions
}
// In the context of system events, LeafzEventOptions are options passed to Leafz
type LeafzEventOptions struct {
LeafzOptions
EventFilterOptions
}
// In the context of system events, AccountzEventOptions are options passed to Accountz
type AccountzEventOptions struct {
AccountzOptions
EventFilterOptions
}
// In the context of system events, JszEventOptions are options passed to Jsz
type JszEventOptions struct {
JSzOptions
EventFilterOptions
}
// returns true if the request does NOT apply to this server and can be ignored.
// DO NOT hold the server lock when
func (s *Server) filterRequest(fOpts *EventFilterOptions) bool {
if fOpts.Name != _EMPTY_ && !strings.Contains(s.info.Name, fOpts.Name) {
return true
}
if fOpts.Host != _EMPTY_ && !strings.Contains(s.info.Host, fOpts.Host) {
return true
}
if fOpts.Cluster != _EMPTY_ {
s.mu.Lock()
cluster := s.info.Cluster
s.mu.Unlock()
if !strings.Contains(cluster, fOpts.Cluster) {
return true
}
}
if len(fOpts.Tags) > 0 {
opts := s.getOpts()
for _, t := range fOpts.Tags {
if !opts.Tags.Contains(t) {
return true
}
}
}
if fOpts.Domain != _EMPTY_ && s.getOpts().JetStreamDomain != fOpts.Domain {
return true
}
return false
}
// statszReq is a request for us to respond with current statsz.
func (s *Server) statszReq(sub *subscription, _ *client, _ *Account, subject, reply string, msg []byte) {
if !s.EventsEnabled() || reply == _EMPTY_ {
return
}
opts := StatszEventOptions{}
if len(msg) != 0 {
if err := json.Unmarshal(msg, &opts); err != nil {
server := &ServerInfo{}
response := map[string]interface{}{"server": server}
response["error"] = map[string]interface{}{
"code": http.StatusBadRequest,
"description": err.Error(),
}
s.sendInternalMsgLocked(reply, _EMPTY_, server, response)
return
} else if ignore := s.filterRequest(&opts.EventFilterOptions); ignore {
return
}
}
s.mu.Lock()
s.sendStatsz(reply)
s.mu.Unlock()
}
var errSkipZreq = errors.New("filtered response")
func (s *Server) zReq(c *client, reply string, rmsg []byte, fOpts *EventFilterOptions, optz interface{}, respf func() (interface{}, error)) {
if !s.EventsEnabled() || reply == _EMPTY_ {
return
}
server := &ServerInfo{}
response := map[string]interface{}{"server": server}
var err error
status := 0
_, msg := c.msgParts(rmsg)
if len(msg) != 0 {
if err = json.Unmarshal(msg, optz); err != nil {
status = http.StatusBadRequest // status is only included on error, so record how far execution got
} else if s.filterRequest(fOpts) {
return
}
}
if err == nil {
response["data"], err = respf()
if errors.Is(err, errSkipZreq) {
return
}
status = http.StatusInternalServerError
}
if err != nil {
response["error"] = map[string]interface{}{
"code": status,
"description": err.Error(),
}
}
s.sendInternalMsgLocked(reply, _EMPTY_, server, response)
}
// remoteConnsUpdate gets called when we receive a remote update from another server.
func (s *Server) remoteConnsUpdate(sub *subscription, _ *client, _ *Account, subject, reply string, msg []byte) {
if !s.eventsRunning() {
return
}
m := AccountNumConns{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account connection event message: %v", err)
return
}
// See if we have the account registered, if not drop it.
// Make sure this does not force us to load this account here.
var acc *Account
if v, ok := s.accounts.Load(m.Account); ok {
acc = v.(*Account)
}
// Silently ignore these if we do not have local interest in the account.
if acc == nil {
return
}
s.mu.Lock()
// check again here if we have been shutdown.
if !s.running || !s.eventsEnabled() {
s.mu.Unlock()
return
}
// Double check that this is not us, should never happen, so error if it does.
if m.Server.ID == s.info.ID {
s.sys.client.Errorf("Processing our own account connection event message: ignored")
s.mu.Unlock()
return
}
// If we are here we have interest in tracking this account. Update our accounting.
clients := acc.updateRemoteServer(&m)
s.updateRemoteServer(&m.Server)
s.mu.Unlock()
// Need to close clients outside of server lock
for _, c := range clients {
c.maxAccountConnExceeded()
}
}
// Setup tracking for this account. This allows us to track global account activity.
// Lock should be held on entry.
func (s *Server) enableAccountTracking(a *Account) {
if a == nil || !s.eventsEnabled() {
return
}
// TODO(ik): Generate payload although message may not be sent.
// May need to ensure we do so only if there is a known interest.
// This can get complicated with gateways.
subj := fmt.Sprintf(accReqSubj, a.Name, "CONNS")
reply := fmt.Sprintf(connsRespSubj, s.info.ID)
m := accNumConnsReq{Account: a.Name}
s.sendInternalMsg(subj, reply, &m.Server, &m)
}
// Event on leaf node connect.
// Lock should NOT be held on entry.
func (s *Server) sendLeafNodeConnect(a *Account) {
s.mu.Lock()
// If we are not in operator mode, or do not have any gateways defined, this should also be a no-op.
if a == nil || !s.eventsEnabled() || !s.gateway.enabled {
s.mu.Unlock()
return
}
s.sendLeafNodeConnectMsg(a.Name)
s.mu.Unlock()
s.switchAccountToInterestMode(a.Name)
}
// Send the leafnode connect message.
// Lock should be held.
func (s *Server) sendLeafNodeConnectMsg(accName string) {
subj := fmt.Sprintf(leafNodeConnectEventSubj, accName)
m := accNumConnsReq{Account: accName}
s.sendInternalMsg(subj, "", &m.Server, &m)
}
// sendAccConnsUpdate is called to send out our information on the
// account's local connections.
// Lock should be held on entry.
func (s *Server) sendAccConnsUpdate(a *Account, subj ...string) {
if !s.eventsEnabled() || a == nil {
return
}
sendQ := s.sys.sendq
if sendQ == nil {
return
}
// Build event with account name and number of local clients and leafnodes.
eid := s.nextEventID()
a.mu.Lock()
s.mu.Unlock()
localConns := a.numLocalConnections()
m := &AccountNumConns{
TypedEvent: TypedEvent{
Type: AccountNumConnsMsgType,
ID: eid,
Time: time.Now().UTC(),
},
Account: a.Name,
Conns: localConns,
LeafNodes: a.numLocalLeafNodes(),
TotalConns: localConns + a.numLocalLeafNodes(),
}
// Set timer to fire again unless we are at zero.
if localConns == 0 {
clearTimer(&a.ctmr)
} else {
// Check to see if we have an HB running and update.
if a.ctmr == nil {
a.ctmr = time.AfterFunc(eventsHBInterval, func() { s.accConnsUpdate(a) })
} else {
a.ctmr.Reset(eventsHBInterval)
}
}
for _, sub := range subj {
msg := &pubMsg{nil, sub, _EMPTY_, &m.Server, &m, false}
select {
case sendQ <- msg:
default:
a.mu.Unlock()
sendQ <- msg
a.mu.Lock()
}
}
a.mu.Unlock()
s.mu.Lock()
}
// accConnsUpdate is called whenever there is a change to the account's
// number of active connections, or during a heartbeat.
func (s *Server) accConnsUpdate(a *Account) {
s.mu.Lock()
defer s.mu.Unlock()
if !s.eventsEnabled() || a == nil {
return
}
s.sendAccConnsUpdate(a, fmt.Sprintf(accConnsEventSubjOld, a.Name), fmt.Sprintf(accConnsEventSubjNew, a.Name))
}
// server lock should be held
func (s *Server) nextEventID() string {
return s.eventIds.Next()
}
// accountConnectEvent will send an account client connect event if there is interest.
// This is a billing event.
func (s *Server) accountConnectEvent(c *client) {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
gacc := s.gacc
eid := s.nextEventID()
s.mu.Unlock()
c.mu.Lock()
// Ignore global account activity
if c.acc == nil || c.acc == gacc {
c.mu.Unlock()
return
}
m := ConnectEventMsg{
TypedEvent: TypedEvent{
Type: ConnectEventMsgType,
ID: eid,
Time: time.Now().UTC(),
},
Client: ClientInfo{
Start: &c.start,
Host: c.host,
ID: c.cid,
Account: accForClient(c),
User: c.getRawAuthUser(),
Name: c.opts.Name,
Lang: c.opts.Lang,
Version: c.opts.Version,
Jwt: c.opts.JWT,
IssuerKey: issuerForClient(c),
Tags: c.tags,
NameTag: c.nameTag,
Kind: c.kindString(),
ClientType: c.clientTypeString(),
},
}
c.mu.Unlock()
subj := fmt.Sprintf(connectEventSubj, c.acc.Name)
s.sendInternalMsgLocked(subj, _EMPTY_, &m.Server, &m)
}
// accountDisconnectEvent will send an account client disconnect event if there is interest.
// This is a billing event.
func (s *Server) accountDisconnectEvent(c *client, now time.Time, reason string) {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
gacc := s.gacc
eid := s.nextEventID()
s.mu.Unlock()
c.mu.Lock()
// Ignore global account activity
if c.acc == nil || c.acc == gacc {
c.mu.Unlock()
return
}
m := DisconnectEventMsg{
TypedEvent: TypedEvent{
Type: DisconnectEventMsgType,
ID: eid,
Time: now,
},
Client: ClientInfo{
Start: &c.start,
Stop: &now,
Host: c.host,
ID: c.cid,
Account: accForClient(c),
User: c.getRawAuthUser(),
Name: c.opts.Name,
Lang: c.opts.Lang,
Version: c.opts.Version,
RTT: c.getRTT(),
Jwt: c.opts.JWT,
IssuerKey: issuerForClient(c),
Tags: c.tags,
NameTag: c.nameTag,
Kind: c.kindString(),
ClientType: c.clientTypeString(),
},
Sent: DataStats{
Msgs: atomic.LoadInt64(&c.inMsgs),
Bytes: atomic.LoadInt64(&c.inBytes),
},
Received: DataStats{
Msgs: c.outMsgs,
Bytes: c.outBytes,
},
Reason: reason,
}
accName := c.acc.Name
c.mu.Unlock()
subj := fmt.Sprintf(disconnectEventSubj, accName)
s.sendInternalMsgLocked(subj, _EMPTY_, &m.Server, &m)
}
func (s *Server) sendAuthErrorEvent(c *client) {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
eid := s.nextEventID()
s.mu.Unlock()
now := time.Now().UTC()
c.mu.Lock()
m := DisconnectEventMsg{
TypedEvent: TypedEvent{
Type: DisconnectEventMsgType,
ID: eid,
Time: now,
},
Client: ClientInfo{
Start: &c.start,
Stop: &now,
Host: c.host,
ID: c.cid,
Account: accForClient(c),
User: c.getRawAuthUser(),
Name: c.opts.Name,
Lang: c.opts.Lang,
Version: c.opts.Version,
RTT: c.getRTT(),
Jwt: c.opts.JWT,
IssuerKey: issuerForClient(c),
Tags: c.tags,
NameTag: c.nameTag,
Kind: c.kindString(),
ClientType: c.clientTypeString(),
},
Sent: DataStats{
Msgs: c.inMsgs,
Bytes: c.inBytes,
},
Received: DataStats{
Msgs: c.outMsgs,
Bytes: c.outBytes,
},
Reason: AuthenticationViolation.String(),
}
c.mu.Unlock()
s.mu.Lock()
subj := fmt.Sprintf(authErrorEventSubj, s.info.ID)
s.sendInternalMsg(subj, _EMPTY_, &m.Server, &m)
s.mu.Unlock()
}
// Internal message callback.
// If the msg is needed past the callback it is required to be copied.
// rmsg contains header and the message. use client.msgParts(rmsg) to split them apart
type msgHandler func(sub *subscription, client *client, acc *Account, subject, reply string, rmsg []byte)
// Create an internal subscription. sysSubscribeQ for queue groups.
func (s *Server) sysSubscribe(subject string, cb msgHandler) (*subscription, error) {
return s.systemSubscribe(subject, _EMPTY_, false, nil, cb)
}
// Create an internal subscription with queue
func (s *Server) sysSubscribeQ(subject, queue string, cb msgHandler) (*subscription, error) {
return s.systemSubscribe(subject, queue, false, nil, cb)
}
// Create an internal subscription but do not forward interest.
func (s *Server) sysSubscribeInternal(subject string, cb msgHandler) (*subscription, error) {
return s.systemSubscribe(subject, _EMPTY_, true, nil, cb)
}
func (s *Server) systemSubscribe(subject, queue string, internalOnly bool, c *client, cb msgHandler) (*subscription, error) {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return nil, ErrNoSysAccount
}
if cb == nil {
s.mu.Unlock()
return nil, fmt.Errorf("undefined message handler")
}
if c == nil {
c = s.sys.client
}
trace := c.trace
s.sys.sid++
sid := strconv.Itoa(s.sys.sid)
s.mu.Unlock()
// Now create the subscription
if trace {
c.traceInOp("SUB", []byte(subject+" "+queue+" "+sid))
}
var q []byte
if queue != "" {
q = []byte(queue)
}
// Now create the subscription
return c.processSub([]byte(subject), q, []byte(sid), cb, internalOnly)
}
func (s *Server) sysUnsubscribe(sub *subscription) {
if sub == nil {
return
}
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
c := sub.client
s.mu.Unlock()
if c != nil {
c.processUnsub(sub.sid)
}
}
// This will generate the tracking subject for remote latency from the response subject.
func remoteLatencySubjectForResponse(subject []byte) string {
if !isTrackedReply(subject) {
return ""
}
toks := bytes.Split(subject, []byte(tsep))
// FIXME(dlc) - Sprintf may become a performance concern at some point.
return fmt.Sprintf(remoteLatencyEventSubj, toks[len(toks)-2])
}
// remoteLatencyUpdate is used to track remote latency measurements for tracking on exported services.
func (s *Server) remoteLatencyUpdate(sub *subscription, _ *client, _ *Account, subject, _ string, msg []byte) {
if !s.eventsRunning() {
return
}
rl := remoteLatency{}
if err := json.Unmarshal(msg, &rl); err != nil {
s.Errorf("Error unmarshalling remote latency measurement: %v", err)
return
}
// Now we need to look up the responseServiceImport associated with this measurement.
acc, err := s.LookupAccount(rl.Account)
if err != nil {
s.Warnf("Could not lookup account %q for latency measurement", rl.Account)
return
}
// Now get the request id / reply. We need to see if we have a GW prefix and if so strip that off.
reply := rl.ReqId
if gwPrefix, old := isGWRoutedSubjectAndIsOldPrefix([]byte(reply)); gwPrefix {
reply = string(getSubjectFromGWRoutedReply([]byte(reply), old))
}
acc.mu.RLock()
si := acc.exports.responses[reply]
if si == nil {
acc.mu.RUnlock()
return
}
m1 := si.m1
m2 := rl.M2
lsub := si.latency.subject
acc.mu.RUnlock()
// So we have not processed the response tracking measurement yet.
if m1 == nil {
si.acc.mu.Lock()
// Double check since could have slipped in.
m1 = si.m1
if m1 == nil {
// Store our value there for them to pick up.
si.m1 = &m2
}
si.acc.mu.Unlock()
if m1 == nil {
return
}
}
// Calculate the correct latencies given M1 and M2.
m1.merge(&m2)
// Clear the requesting client since we send the result here.
acc.mu.Lock()
si.rc = nil
acc.mu.Unlock()
// Make sure we remove the entry here.
acc.removeServiceImport(si.from)
// Send the metrics
s.sendInternalAccountMsg(acc, lsub, m1)
}
// This is used for all inbox replies so that we do not send supercluster wide interest
// updates for every request. Same trick used in modern NATS clients.
func (s *Server) inboxReply(sub *subscription, c *client, acc *Account, subject, reply string, msg []byte) {
s.mu.Lock()
if !s.eventsEnabled() || s.sys.replies == nil {
s.mu.Unlock()
return
}
cb, ok := s.sys.replies[subject]
s.mu.Unlock()
if ok && cb != nil {
cb(sub, c, acc, subject, reply, msg)
}
}
// Copied from go client.
// We could use serviceReply here instead to save some code.
// I prefer these semantics for the moment, when tracing you know what this is.
const (
InboxPrefix = "$SYS._INBOX."
inboxPrefixLen = len(InboxPrefix)
respInboxPrefixLen = inboxPrefixLen + sysHashLen + 1
replySuffixLen = 8 // Gives us 62^8
)
// Creates an internal inbox used for replies that will be processed by the global wc handler.
func (s *Server) newRespInbox() string {
var b [respInboxPrefixLen + replySuffixLen]byte
pres := b[:respInboxPrefixLen]
copy(pres, s.sys.inboxPre)
rn := rand.Int63()
for i, l := respInboxPrefixLen, rn; i < len(b); i++ {
b[i] = digits[l%base]
l /= base
}
return string(b[:])
}
// accNumSubsReq is sent when we need to gather remote info on subs.
type accNumSubsReq struct {
Account string `json:"acc"`
Subject string `json:"subject"`
Queue []byte `json:"queue,omitempty"`
}
// helper function to total information from results to count subs.
func totalSubs(rr *SublistResult, qg []byte) (nsubs int32) {
if rr == nil {
return
}
checkSub := func(sub *subscription) {
// TODO(dlc) - This could be smarter.
if qg != nil && !bytes.Equal(qg, sub.queue) {
return
}
if sub.client.kind == CLIENT || sub.client.isHubLeafNode() {
nsubs++
}
}
if qg == nil {
for _, sub := range rr.psubs {
checkSub(sub)
}
}
for _, qsub := range rr.qsubs {
for _, sub := range qsub {
checkSub(sub)
}
}
return
}
// Allows users of large systems to debug active subscribers for a given subject.
// Payload should be the subject of interest.
func (s *Server) debugSubscribers(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) {
// Even though this is an internal only subscription, meaning interest was not forwarded, we could
// get one here from a GW in optimistic mode. Ignore for now.
// FIXME(dlc) - Should we send no interest here back to the GW?
if c.kind != CLIENT {
return
}
_, acc, _, msg, err := s.getRequestInfo(c, rmsg)
if err != nil {
return
}
// We could have a single subject or we could have a subject and a wildcard separated by whitespace.
args := strings.Split(strings.TrimSpace(string(msg)), " ")
if len(args) == 0 {
s.sendInternalAccountMsg(acc, reply, 0)
return
}
tsubj := args[0]
var qgroup []byte
if len(args) > 1 {
qgroup = []byte(args[1])
}
var nsubs int32
if subjectIsLiteral(tsubj) {
// We will look up subscribers locally first then determine if we need to solicit other servers.
rr := acc.sl.Match(tsubj)
nsubs = totalSubs(rr, qgroup)
} else {
// We have a wildcard, so this is a bit slower path.
var _subs [32]*subscription
subs := _subs[:0]
acc.sl.All(&subs)
for _, sub := range subs {
if subjectIsSubsetMatch(string(sub.subject), tsubj) {
if qgroup != nil && !bytes.Equal(qgroup, sub.queue) {
continue
}
if sub.client.kind == CLIENT || sub.client.isHubLeafNode() {
nsubs++
}
}
}
}
// We should have an idea of how many responses to expect from remote servers.
var expected = acc.expectedRemoteResponses()
// If we are only local, go ahead and return.
if expected == 0 {
s.sendInternalAccountMsg(nil, reply, nsubs)
return
}
// We need to solicit from others.
// To track status.
responses := int32(0)
done := make(chan (bool))
s.mu.Lock()
// Create direct reply inbox that we multiplex under the WC replies.
replySubj := s.newRespInbox()
// Store our handler.
s.sys.replies[replySubj] = func(sub *subscription, _ *client, _ *Account, subject, _ string, msg []byte) {
if n, err := strconv.Atoi(string(msg)); err == nil {
atomic.AddInt32(&nsubs, int32(n))
}
if atomic.AddInt32(&responses, 1) >= expected {
select {
case done <- true:
default:
}
}
}
// Send the request to the other servers.
request := &accNumSubsReq{
Account: acc.Name,
Subject: tsubj,
Queue: qgroup,
}
s.sendInternalMsg(accNumSubsReqSubj, replySubj, nil, request)
s.mu.Unlock()
// FIXME(dlc) - We should rate limit here instead of blind Go routine.
go func() {
select {
case <-done:
case <-time.After(500 * time.Millisecond):
}
// Cleanup the WC entry.
var sendResponse bool
s.mu.Lock()
if s.sys != nil && s.sys.replies != nil {
delete(s.sys.replies, replySubj)
sendResponse = true
}
s.mu.Unlock()
if sendResponse {
// Send the response.
s.sendInternalAccountMsg(nil, reply, atomic.LoadInt32(&nsubs))
}
}()
}
// Request for our local subscription count. This will come from a remote origin server
// that received the initial request.
func (s *Server) nsubsRequest(sub *subscription, _ *client, _ *Account, subject, reply string, msg []byte) {
if !s.eventsRunning() {
return
}
m := accNumSubsReq{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account nsubs request message: %v", err)
return
}
// Grab account.
acc, _ := s.lookupAccount(m.Account)
if acc == nil || acc.numLocalAndLeafConnections() == 0 {
return
}
// We will look up subscribers locally first then determine if we need to solicit other servers.
var nsubs int32
if subjectIsLiteral(m.Subject) {
rr := acc.sl.Match(m.Subject)
nsubs = totalSubs(rr, m.Queue)
} else {
// We have a wildcard, so this is a bit slower path.
var _subs [32]*subscription
subs := _subs[:0]
acc.sl.All(&subs)
for _, sub := range subs {
if (sub.client.kind == CLIENT || sub.client.isHubLeafNode()) && subjectIsSubsetMatch(string(sub.subject), m.Subject) {
if m.Queue != nil && !bytes.Equal(m.Queue, sub.queue) {
continue
}
nsubs++
}
}
}
s.sendInternalMsgLocked(reply, _EMPTY_, nil, nsubs)
}
// Helper to grab account name for a client.
func accForClient(c *client) string {
if c.acc != nil {
return c.acc.Name
}
return "N/A"
}
// Helper to grab issuer for a client.
func issuerForClient(c *client) (issuerKey string) {
if c == nil || c.user == nil {
return
}
issuerKey = c.user.SigningKey
if issuerKey == _EMPTY_ && c.user.Account != nil {
issuerKey = c.user.Account.Name
}
return
}
// Helper to clear timers.
func clearTimer(tp **time.Timer) {
if t := *tp; t != nil {
t.Stop()
*tp = nil
}
}
// Helper function to wrap functions with common test
// to lock server and return if events not enabled.
func (s *Server) wrapChk(f func()) func() {
return func() {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
f()
s.mu.Unlock()
}
}
| 1 | 13,848 | I am a bit confused about PING here. Ping is implied and it overlaps with "$SYS.REQ.ACCOUNT.%s.%s" where the last token is what is requested (CONNZ) and the one before is the requested account id. | nats-io-nats-server | go |
@@ -708,3 +708,15 @@ func (v *volumeClient) CloudMigrateStatus() (*api.CloudMigrateStatusResponse, er
}
return statusResponse, nil
}
+
+// Du specified volume id and specifically path (if provided)
+func (v *volumeClient) Catalog(id, subfolder, maxDepth string) (api.CatalogResponse, error) {
+ var catalog api.CatalogResponse
+
+ req := v.c.Get().Resource(volumePath + "/catalog").Instance(id)
+ if err := req.QueryOption(api.OptCatalogSubFolder, subfolder).QueryOption(api.OptCatalogMaxDepth, maxDepth).Do().Unmarshal(&catalog); err != nil {
+ return catalog, err
+ }
+
+ return catalog, nil
+} | 1 | package volume
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"strconv"
"github.com/libopenstorage/openstorage/api"
"github.com/libopenstorage/openstorage/api/client"
"github.com/libopenstorage/openstorage/volume"
)
const (
graphPath = "/graph"
volumePath = "/osd-volumes"
snapPath = "/osd-snapshot"
)
type volumeClient struct {
volume.IODriver
c *client.Client
}
func newVolumeClient(c *client.Client) volume.VolumeDriver {
return &volumeClient{volume.IONotSupported, c}
}
// String description of this driver.
func (v *volumeClient) Name() string {
return "VolumeDriver"
}
func (v *volumeClient) Type() api.DriverType {
// Block drivers implement the superset.
return api.DriverType_DRIVER_TYPE_BLOCK
}
func (v *volumeClient) GraphDriverCreate(id string, parent string) error {
response := ""
if err := v.c.Put().Resource(graphPath + "/create").Instance(id).Do().Unmarshal(&response); err != nil {
return err
}
if response != id {
return fmt.Errorf("Invalid response: %s", response)
}
return nil
}
func (v *volumeClient) GraphDriverRemove(id string) error {
response := ""
if err := v.c.Put().Resource(graphPath + "/remove").Instance(id).Do().Unmarshal(&response); err != nil {
return err
}
if response != id {
return fmt.Errorf("Invalid response: %s", response)
}
return nil
}
func (v *volumeClient) GraphDriverGet(id string, mountLabel string) (string, error) {
response := ""
if err := v.c.Get().Resource(graphPath + "/inspect").Instance(id).Do().Unmarshal(&response); err != nil {
return "", err
}
return response, nil
}
func (v *volumeClient) GraphDriverRelease(id string) error {
response := ""
if err := v.c.Put().Resource(graphPath + "/release").Instance(id).Do().Unmarshal(&response); err != nil {
return err
}
if response != id {
return fmt.Errorf("Invalid response: %v", response)
}
return nil
}
func (v *volumeClient) GraphDriverExists(id string) bool {
response := false
v.c.Get().Resource(graphPath + "/exists").Instance(id).Do().Unmarshal(&response)
return response
}
func (v *volumeClient) GraphDriverDiff(id string, parent string) io.Writer {
body, _ := v.c.Get().Resource(graphPath + "/diff?id=" + id + "&parent=" + parent).Do().Body()
return bytes.NewBuffer(body)
}
func (v *volumeClient) GraphDriverChanges(id string, parent string) ([]api.GraphDriverChanges, error) {
var changes []api.GraphDriverChanges
err := v.c.Get().Resource(graphPath + "/changes").Instance(id).Do().Unmarshal(&changes)
return changes, err
}
func (v *volumeClient) GraphDriverApplyDiff(id string, parent string, diff io.Reader) (int, error) {
b, err := ioutil.ReadAll(diff)
if err != nil {
return 0, err
}
response := 0
if err = v.c.Put().Resource(graphPath + "/diff?id=" + id + "&parent=" + parent).Instance(id).Body(b).Do().Unmarshal(&response); err != nil {
return 0, err
}
return response, nil
}
func (v *volumeClient) GraphDriverDiffSize(id string, parent string) (int, error) {
size := 0
err := v.c.Get().Resource(graphPath + "/diffsize").Instance(id).Do().Unmarshal(&size)
return size, err
}
// Create a new Vol for the specific volume spev.c.
// It returns a system generated VolumeID that uniquely identifies the volume
func (v *volumeClient) Create(locator *api.VolumeLocator, source *api.Source,
spec *api.VolumeSpec) (string, error) {
response := &api.VolumeCreateResponse{}
request := &api.VolumeCreateRequest{
Locator: locator,
Source: source,
Spec: spec,
}
if err := v.c.Post().Resource(volumePath).Body(request).Do().Unmarshal(response); err != nil {
return "", err
}
if response.VolumeResponse != nil && response.VolumeResponse.Error != "" {
return "", errors.New(response.VolumeResponse.Error)
}
return response.Id, nil
}
// Status diagnostic information
func (v *volumeClient) Status() [][2]string {
return [][2]string{}
}
// Inspect specified volumes.
// Errors ErrEnoEnt may be returned.
func (v *volumeClient) Inspect(ids []string) ([]*api.Volume, error) {
if len(ids) == 0 {
return nil, nil
}
var volumes []*api.Volume
request := v.c.Get().Resource(volumePath)
for _, id := range ids {
request.QueryOption(api.OptVolumeID, id)
}
if err := request.Do().Unmarshal(&volumes); err != nil {
return nil, err
}
return volumes, nil
}
// Delete volume.
// Errors ErrEnoEnt, ErrVolHasSnaps may be returned.
func (v *volumeClient) Delete(volumeID string) error {
response := &api.VolumeResponse{}
if err := v.c.Delete().Resource(volumePath).Instance(volumeID).Do().Unmarshal(response); err != nil {
return err
}
if response.Error != "" {
return errors.New(response.Error)
}
return nil
}
// Snap specified volume. IO to the underlying volume should be quiesced before
// calling this function.
// Errors ErrEnoEnt may be returned
func (v *volumeClient) Snapshot(volumeID string, readonly bool,
locator *api.VolumeLocator) (string, error) {
response := &api.SnapCreateResponse{}
request := &api.SnapCreateRequest{
Id: volumeID,
Readonly: readonly,
Locator: locator,
}
if err := v.c.Post().Resource(snapPath).Body(request).Do().Unmarshal(response); err != nil {
return "", err
}
// TODO(pedge): this probably should not be embedded in this way
if response.VolumeCreateResponse != nil &&
response.VolumeCreateResponse.VolumeResponse != nil &&
response.VolumeCreateResponse.VolumeResponse.Error != "" {
return "", errors.New(
response.VolumeCreateResponse.VolumeResponse.Error)
}
if response.VolumeCreateResponse != nil {
return response.VolumeCreateResponse.Id, nil
}
return "", nil
}
// Restore specified volume to given snapshot state
func (v *volumeClient) Restore(volumeID string, snapID string) error {
response := &api.VolumeResponse{}
req := v.c.Post().Resource(snapPath + "/restore").Instance(volumeID)
req.QueryOption(api.OptSnapID, snapID)
if err := req.Do().Unmarshal(response); err != nil {
return err
}
if response.Error != "" {
return errors.New(response.Error)
}
return nil
}
// Stats for specified volume.
// Errors ErrEnoEnt may be returned
func (v *volumeClient) Stats(
volumeID string,
cumulative bool,
) (*api.Stats, error) {
stats := &api.Stats{}
req := v.c.Get().Resource(volumePath + "/stats").Instance(volumeID)
req.QueryOption(api.OptCumulative, strconv.FormatBool(cumulative))
err := req.Do().Unmarshal(stats)
return stats, err
}
// UsedSize returns allocated volume size.
// Errors ErrEnoEnt may be returned
func (v *volumeClient) UsedSize(
volumeID string,
) (uint64, error) {
var usedSize uint64
req := v.c.Get().Resource(volumePath + "/usedsize").Instance(volumeID)
err := req.Do().Unmarshal(&usedSize)
return usedSize, err
}
// Active Requests on all volume.
func (v *volumeClient) GetActiveRequests() (*api.ActiveRequests, error) {
requests := &api.ActiveRequests{}
resp := v.c.Get().Resource(volumePath + "/requests").Instance("vol_id").Do()
if resp.Error() != nil {
return nil, resp.FormatError()
}
if err := resp.Unmarshal(requests); err != nil {
return nil, err
}
return requests, nil
}
// Shutdown and cleanup.
func (v *volumeClient) Shutdown() {}
// Enumerate volumes that map to the volumeLocator. Locator fields may be regexp.
// If locator fields are left blank, this will return all volumes.
func (v *volumeClient) Enumerate(locator *api.VolumeLocator,
labels map[string]string) ([]*api.Volume, error) {
var volumes []*api.Volume
req := v.c.Get().Resource(volumePath)
if locator.Name != "" {
req.QueryOption(api.OptName, locator.Name)
}
if len(locator.VolumeLabels) != 0 {
req.QueryOptionLabel(api.OptLabel, locator.VolumeLabels)
}
if len(labels) != 0 {
req.QueryOptionLabel(api.OptConfigLabel, labels)
}
resp := req.Do()
if resp.Error() != nil {
return nil, resp.FormatError()
}
if err := resp.Unmarshal(&volumes); err != nil {
return nil, err
}
return volumes, nil
}
// Enumerate snaps for specified volume
// Count indicates the number of snaps populated.
func (v *volumeClient) SnapEnumerate(ids []string,
snapLabels map[string]string) ([]*api.Volume, error) {
var volumes []*api.Volume
request := v.c.Get().Resource(snapPath)
for _, id := range ids {
request.QueryOption(api.OptVolumeID, id)
}
if len(snapLabels) != 0 {
request.QueryOptionLabel(api.OptLabel, snapLabels)
}
if err := request.Do().Unmarshal(&volumes); err != nil {
return nil, err
}
return volumes, nil
}
// Attach map device to the host.
// On success the devicePath specifies location where the device is exported
// Errors ErrEnoEnt, ErrVolAttached may be returned.
func (v *volumeClient) Attach(volumeID string, attachOptions map[string]string) (string, error) {
response, err := v.doVolumeSetGetResponse(
volumeID,
&api.VolumeSetRequest{
Action: &api.VolumeStateAction{
Attach: api.VolumeActionParam_VOLUME_ACTION_PARAM_ON,
},
Options: attachOptions,
},
)
if err != nil {
return "", err
}
if response.Volume != nil {
if response.Volume.Spec.Encrypted {
return response.Volume.SecureDevicePath, nil
} else {
return response.Volume.DevicePath, nil
}
}
return "", nil
}
// Detach device from the host.
// Errors ErrEnoEnt, ErrVolDetached may be returned.
func (v *volumeClient) Detach(volumeID string, options map[string]string) error {
return v.doVolumeSet(
volumeID,
&api.VolumeSetRequest{
Action: &api.VolumeStateAction{
Attach: api.VolumeActionParam_VOLUME_ACTION_PARAM_OFF,
},
Options: options,
},
)
}
func (v *volumeClient) MountedAt(mountPath string) string {
return ""
}
// Mount volume at specified path
// Errors ErrEnoEnt, ErrVolDetached may be returned.
func (v *volumeClient) Mount(volumeID string, mountPath string, options map[string]string) error {
return v.doVolumeSet(
volumeID,
&api.VolumeSetRequest{
Action: &api.VolumeStateAction{
Mount: api.VolumeActionParam_VOLUME_ACTION_PARAM_ON,
MountPath: mountPath,
},
Options: options,
},
)
}
// Unmount volume at specified path
// Errors ErrEnoEnt, ErrVolDetached may be returned.
func (v *volumeClient) Unmount(volumeID string, mountPath string, options map[string]string) error {
return v.doVolumeSet(
volumeID,
&api.VolumeSetRequest{
Action: &api.VolumeStateAction{
Mount: api.VolumeActionParam_VOLUME_ACTION_PARAM_OFF,
MountPath: mountPath,
},
Options: options,
},
)
}
// Update volume
func (v *volumeClient) Set(volumeID string, locator *api.VolumeLocator,
spec *api.VolumeSpec) error {
return v.doVolumeSet(
volumeID,
&api.VolumeSetRequest{
Locator: locator,
Spec: spec,
},
)
}
func (v *volumeClient) doVolumeSet(volumeID string,
request *api.VolumeSetRequest) error {
_, err := v.doVolumeSetGetResponse(volumeID, request)
return err
}
func (v *volumeClient) doVolumeSetGetResponse(volumeID string,
request *api.VolumeSetRequest) (*api.VolumeSetResponse, error) {
response := &api.VolumeSetResponse{}
if err := v.c.Put().Resource(volumePath).Instance(volumeID).Body(request).Do().Unmarshal(response); err != nil {
return nil, err
}
if response.VolumeResponse != nil && response.VolumeResponse.Error != "" {
return nil, errors.New(response.VolumeResponse.Error)
}
return response, nil
}
// Quiesce quiesces volume i/o
func (v *volumeClient) Quiesce(
volumeID string,
timeoutSec uint64,
quiesceID string,
) error {
response := &api.VolumeResponse{}
req := v.c.Post().Resource(volumePath + "/quiesce").Instance(volumeID)
req.QueryOption(api.OptTimeoutSec, strconv.FormatUint(timeoutSec, 10))
req.QueryOption(api.OptQuiesceID, quiesceID)
if err := req.Do().Unmarshal(response); err != nil {
return err
}
if response.Error != "" {
return errors.New(response.Error)
}
return nil
}
// Unquiesce un-quiesces volume i/o
func (v *volumeClient) Unquiesce(volumeID string) error {
response := &api.VolumeResponse{}
req := v.c.Post().Resource(volumePath + "/unquiesce").Instance(volumeID)
if err := req.Do().Unmarshal(response); err != nil {
return err
}
if response.Error != "" {
return errors.New(response.Error)
}
return nil
}
// CredsEnumerate enumerates configured credentials in the cluster
func (v *volumeClient) CredsEnumerate() (map[string]interface{}, error) {
creds := make(map[string]interface{}, 0)
err := v.c.Get().Resource(api.OsdCredsPath).Do().Unmarshal(&creds)
return creds, err
}
// CredsCreate creates credentials for a given cloud provider
func (v *volumeClient) CredsCreate(params map[string]string) (string, error) {
createResponse := api.CredCreateResponse{}
request := &api.CredCreateRequest{
InputParams: params,
}
req := v.c.Post().Resource(api.OsdCredsPath).Body(request)
response := req.Do()
if response.Error() != nil {
return "", response.FormatError()
}
if err := response.Unmarshal(&createResponse); err != nil {
return "", err
}
return createResponse.UUID, nil
}
// CredsDelete deletes the credential with given UUID
func (v *volumeClient) CredsDelete(uuid string) error {
req := v.c.Delete().Resource(api.OsdCredsPath).Instance(uuid)
response := req.Do()
if response.Error() != nil {
return response.FormatError()
}
return nil
}
// CredsValidate validates the credential by accessuing the cloud
// provider with the given credential
func (v *volumeClient) CredsValidate(uuid string) error {
req := v.c.Put().Resource(api.OsdCredsPath + "/validate").Instance(uuid)
response := req.Do()
if response.Error() != nil {
return response.FormatError()
}
return nil
}
// CloudBackupCreate uploads snapshot of a volume to cloud
func (v *volumeClient) CloudBackupCreate(
input *api.CloudBackupCreateRequest,
) error {
req := v.c.Post().Resource(api.OsdBackupPath).Body(input)
response := req.Do()
if response.Error() != nil {
return response.FormatError()
}
return nil
}
// CloudBackupRestore downloads a cloud backup to a newly created volume
func (v *volumeClient) CloudBackupRestore(
input *api.CloudBackupRestoreRequest,
) (*api.CloudBackupRestoreResponse, error) {
restoreResponse := &api.CloudBackupRestoreResponse{}
req := v.c.Post().Resource(api.OsdBackupPath + "/restore").Body(input)
response := req.Do()
if response.Error() != nil {
return nil, response.FormatError()
}
if err := response.Unmarshal(&restoreResponse); err != nil {
return nil, err
}
return restoreResponse, nil
}
// CloudBackupEnumerate lists the backups for a given cluster/credential/volumeID
func (v *volumeClient) CloudBackupEnumerate(
input *api.CloudBackupEnumerateRequest,
) (*api.CloudBackupEnumerateResponse, error) {
enumerateResponse := &api.CloudBackupEnumerateResponse{}
req := v.c.Get().Resource(api.OsdBackupPath).Body(input)
response := req.Do()
if response.Error() != nil {
return nil, response.FormatError()
}
if err := response.Unmarshal(&enumerateResponse); err != nil {
return nil, err
}
return enumerateResponse, nil
}
// CloudBackupDelete deletes the backups in cloud
func (v *volumeClient) CloudBackupDelete(
input *api.CloudBackupDeleteRequest,
) error {
req := v.c.Delete().Resource(api.OsdBackupPath).Body(input)
response := req.Do()
if response.Error() != nil {
return response.FormatError()
}
return nil
}
// CloudBackupDeleteAll deletes all the backups for a volume in cloud
func (v *volumeClient) CloudBackupDeleteAll(
input *api.CloudBackupDeleteAllRequest,
) error {
req := v.c.Delete().Resource(api.OsdBackupPath + "/all").Body(input)
response := req.Do()
if response.Error() != nil {
return response.FormatError()
}
return nil
}
// CloudBackupStatus gets the most recent status of backup/restores
func (v *volumeClient) CloudBackupStatus(
input *api.CloudBackupStatusRequest,
) (*api.CloudBackupStatusResponse, error) {
statusResponse := &api.CloudBackupStatusResponse{}
req := v.c.Get().Resource(api.OsdBackupPath + "/status").Body(input)
response := req.Do()
if response.Error() != nil {
return nil, response.FormatError()
}
if err := response.Unmarshal(&statusResponse); err != nil {
return nil, err
}
return statusResponse, nil
}
// CloudBackupCatalog displays listing of backup content
func (v *volumeClient) CloudBackupCatalog(
input *api.CloudBackupCatalogRequest,
) (*api.CloudBackupCatalogResponse, error) {
catalogResponse := &api.CloudBackupCatalogResponse{}
req := v.c.Get().Resource(api.OsdBackupPath + "/catalog").Body(input)
response := req.Do()
if response.Error() != nil {
return nil, response.FormatError()
}
if err := response.Unmarshal(&catalogResponse); err != nil {
return nil, err
}
return catalogResponse, nil
}
// CloudBackupHistory displays past backup/restore operations in the cluster
func (v *volumeClient) CloudBackupHistory(
input *api.CloudBackupHistoryRequest,
) (*api.CloudBackupHistoryResponse, error) {
historyResponse := &api.CloudBackupHistoryResponse{}
req := v.c.Get().Resource(api.OsdBackupPath + "/history").Body(input)
response := req.Do()
if response.Error() != nil {
return nil, response.FormatError()
}
if err := response.Unmarshal(&historyResponse); err != nil {
return nil, err
}
return historyResponse, nil
}
// CloudBackupState allows a current backup
// state transisions(pause/resume/stop)
func (v *volumeClient) CloudBackupStateChange(
input *api.CloudBackupStateChangeRequest,
) error {
req := v.c.Put().Resource(api.OsdBackupPath + "/statechange").Body(input)
response := req.Do()
if response.Error() != nil {
return response.FormatError()
}
return nil
}
// CloudBackupSchedCreate for a volume creates a schedule to backup volume to cloud
func (v *volumeClient) CloudBackupSchedCreate(
input *api.CloudBackupSchedCreateRequest,
) (*api.CloudBackupSchedCreateResponse, error) {
createResponse := &api.CloudBackupSchedCreateResponse{}
req := v.c.Post().Resource(api.OsdBackupPath + "/sched").Body(input)
response := req.Do()
if response.Error() != nil {
return nil, response.FormatError()
}
if err := response.Unmarshal(&createResponse); err != nil {
return nil, err
}
return createResponse, nil
}
// CloudBackupSchedDelete delete a volume's cloud backup-schedule
func (v *volumeClient) CloudBackupSchedDelete(
input *api.CloudBackupSchedDeleteRequest,
) error {
req := v.c.Delete().Resource(api.OsdBackupPath + "/sched").Body(input)
response := req.Do()
if response.Error() != nil {
return response.FormatError()
}
return nil
}
// CloudBackupSchedEnumerate enumerates the configured backup-schedules in the cluster
func (v *volumeClient) CloudBackupSchedEnumerate() (*api.CloudBackupSchedEnumerateResponse, error) {
enumerateResponse := &api.CloudBackupSchedEnumerateResponse{}
req := v.c.Get().Resource(api.OsdBackupPath + "/sched")
response := req.Do()
if response.Error() != nil {
return nil, response.FormatError()
}
if err := response.Unmarshal(enumerateResponse); err != nil {
return nil, err
}
return enumerateResponse, nil
}
func (v *volumeClient) SnapshotGroup(groupID string, labels map[string]string) (*api.GroupSnapCreateResponse, error) {
response := &api.GroupSnapCreateResponse{}
request := &api.GroupSnapCreateRequest{
Id: groupID,
Labels: labels,
}
req := v.c.Post().Resource(snapPath + "/snapshotgroup").Body(request)
res := req.Do()
if res.Error() != nil {
return nil, res.FormatError()
}
if err := res.Unmarshal(&response); err != nil {
return nil, err
}
return response, nil
}
func (v *volumeClient) CloudMigrateStart(request *api.CloudMigrateStartRequest) error {
req := v.c.Post().Resource(api.OsdMigrateStartPath).Body(request)
response := req.Do()
if response.Error() != nil {
return response.FormatError()
}
return nil
}
func (v *volumeClient) CloudMigrateCancel(request *api.CloudMigrateCancelRequest) error {
req := v.c.Post().Resource(api.OsdMigrateCancelPath).Body(request)
response := req.Do()
if response.Error() != nil {
return response.FormatError()
}
return nil
}
func (v *volumeClient) CloudMigrateStatus() (*api.CloudMigrateStatusResponse, error) {
statusResponse := &api.CloudMigrateStatusResponse{}
req := v.c.Get().Resource(api.OsdMigrateStatusPath)
response := req.Do()
if response.Error() != nil {
return nil, response.FormatError()
}
if err := response.Unmarshal(statusResponse); err != nil {
return nil, err
}
return statusResponse, nil
}
| 1 | 7,209 | nit: use the OptSubFolder constants. Also in the client you are using "maxdepth" but in the server the constant for "depth" is being used. | libopenstorage-openstorage | go |
@@ -76,8 +76,8 @@ describe('Config file', () => {
test('parse docker.yaml', () => {
const config = new Config(parseConfigFile(resolveConf('docker')));
checkDefaultUplink(config);
- expect(config.storage).toBe('/verdaccio/storage');
- expect(config.auth.htpasswd.file).toBe('/verdaccio/conf/htpasswd');
+ expect(config.storage).toBe('/verdaccio/storage/data');
+ expect(config.auth.htpasswd.file).toBe('/verdaccio/storage/htpasswd');
checkDefaultConfPackages(config);
});
| 1 | import path from 'path';
import _ from 'lodash';
import Config from '../../../src/lib/config';
import {parseConfigFile} from '../../../src/lib/utils';
import {DEFAULT_REGISTRY, DEFAULT_UPLINK, ROLES, WEB_TITLE} from '../../../src/lib/constants';
const resolveConf = (conf) => path.join(__dirname, `../../../conf/${conf}.yaml`);
require('../../../src/lib/logger').setup([]);
const checkDefaultUplink = (config) => {
expect(_.isObject(config.uplinks[DEFAULT_UPLINK])).toBeTruthy();
expect(config.uplinks[DEFAULT_UPLINK].url).toMatch(DEFAULT_REGISTRY);
};
const checkDefaultConfPackages = (config) => {
//auth
expect(_.isObject(config.auth)).toBeTruthy();
expect(_.isObject(config.auth.htpasswd)).toBeTruthy();
expect(config.auth.htpasswd.file).toMatch(/htpasswd/);
//web
expect(_.isObject(config.web)).toBeTruthy();
expect(config.web.title).toBe(WEB_TITLE);
expect(config.web.enable).toBeUndefined();
// packages
expect(_.isObject(config.packages)).toBeTruthy();
expect(Object.keys(config.packages).join('|')).toBe('@*/*|**');
expect(config.packages['@*/*'].access).toBeDefined();
expect(config.packages['@*/*'].access).toContainEqual(ROLES.$ALL);
expect(config.packages['@*/*'].publish).toBeDefined();
expect(config.packages['@*/*'].publish).toContainEqual(ROLES.$AUTH);
expect(config.packages['@*/*'].proxy).toBeDefined();
expect(config.packages['@*/*'].proxy).toContainEqual(DEFAULT_UPLINK);
expect(config.packages['**'].access).toBeDefined();
expect(config.packages['**'].access).toContainEqual(ROLES.$ALL);
expect(config.packages['**'].publish).toBeDefined();
expect(config.packages['**'].publish).toContainEqual(ROLES.$AUTH);
expect(config.packages['**'].proxy).toBeDefined();
expect(config.packages['**'].proxy,).toContainEqual(DEFAULT_UPLINK);
// uplinks
expect(config.uplinks[DEFAULT_UPLINK]).toBeDefined();
expect(config.uplinks[DEFAULT_UPLINK].url).toEqual(DEFAULT_REGISTRY);
// audit
expect(config.middlewares).toBeDefined();
expect(config.middlewares.audit).toBeDefined();
expect(config.middlewares.audit.enabled).toBeTruthy();
// logs
expect(config.logs).toBeDefined();
expect(config.logs[0].type).toEqual('stdout');
expect(config.logs[0].format).toEqual('pretty');
expect(config.logs[0].level).toEqual('http');
//must not be enabled by default
expect(config.notify).toBeUndefined();
expect(config.store).toBeUndefined();
expect(config.publish).toBeUndefined();
expect(config.url_prefix).toBeUndefined();
expect(config.url_prefix).toBeUndefined();
};
describe('Config file', () => {
beforeAll(function() {
this.config = new Config(parseConfigFile(resolveConf('full')));
});
describe('Config file', () => {
test('parse full.yaml', () => {
const config = new Config(parseConfigFile(resolveConf('full')));
checkDefaultUplink(config);
expect(config.storage).toBe('./storage');
checkDefaultConfPackages(config);
});
test('parse docker.yaml', () => {
const config = new Config(parseConfigFile(resolveConf('docker')));
checkDefaultUplink(config);
expect(config.storage).toBe('/verdaccio/storage');
expect(config.auth.htpasswd.file).toBe('/verdaccio/conf/htpasswd');
checkDefaultConfPackages(config);
});
test('parse default.yaml', () => {
const config = new Config(parseConfigFile(resolveConf('default')));
checkDefaultUplink(config);
expect(config.storage).toBe('./storage');
expect(config.auth.htpasswd.file).toBe('./htpasswd');
checkDefaultConfPackages(config);
});
});
});
| 1 | 18,364 | I also moved this to `/verdaccio/storage` since it's written in runtime, and the approach here is to make the configuration by default read-only. | verdaccio-verdaccio | js |
@@ -318,13 +318,13 @@ public class NodeJSGapicContext extends GapicContext implements NodeJSContext {
resourceTypeName = "a " + jsTypeName(resourceType);
}
return callbackMessage
- + "\n@returns {Stream|gax.EventEmitter}\n"
+ + "\n@returns {Stream|Promise}\n"
+ " An object stream which emits "
+ resourceTypeName
+ " on 'data' event.\n"
+ " When the callback is specified or streaming is suppressed through options,\n"
- + " it will return an event emitter to handle the call status and the callback\n"
- + " will be called with the response object.";
+ + " it will return a promise that resolves to the response object. The promise\n"
+ + " has cancel method which cancels the ongoing API call.";
}
MessageType returnMessageType = method.getOutputMessage(); | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.nodejs;
import com.google.api.codegen.GapicContext;
import com.google.api.codegen.config.ApiConfig;
import com.google.api.codegen.config.MethodConfig;
import com.google.api.codegen.transformer.ApiMethodTransformer;
import com.google.api.codegen.transformer.GrpcStubTransformer;
import com.google.api.codegen.transformer.MethodTransformerContext;
import com.google.api.codegen.transformer.ModelTypeTable;
import com.google.api.codegen.transformer.SurfaceTransformerContext;
import com.google.api.codegen.transformer.nodejs.NodeJSFeatureConfig;
import com.google.api.codegen.transformer.nodejs.NodeJSModelTypeNameConverter;
import com.google.api.codegen.transformer.nodejs.NodeJSSurfaceNamer;
import com.google.api.codegen.util.nodejs.NodeJSTypeTable;
import com.google.api.codegen.viewmodel.ApiMethodView;
import com.google.api.codegen.viewmodel.GrpcStubView;
import com.google.api.tools.framework.aspects.documentation.model.DocumentationUtil;
import com.google.api.tools.framework.aspects.documentation.model.ElementDocumentationAttribute;
import com.google.api.tools.framework.model.Field;
import com.google.api.tools.framework.model.FieldSelector;
import com.google.api.tools.framework.model.Interface;
import com.google.api.tools.framework.model.MessageType;
import com.google.api.tools.framework.model.Method;
import com.google.api.tools.framework.model.Model;
import com.google.api.tools.framework.model.ProtoContainerElement;
import com.google.api.tools.framework.model.ProtoElement;
import com.google.api.tools.framework.model.ProtoFile;
import com.google.api.tools.framework.model.TypeRef;
import com.google.api.tools.framework.model.TypeRef.Cardinality;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.protobuf.DescriptorProtos.FieldDescriptorProto.Type;
import java.util.Collections;
import java.util.List;
import javax.annotation.Nullable;
/**
* A GapicContext specialized for NodeJS.
*/
public class NodeJSGapicContext extends GapicContext implements NodeJSContext {
private GrpcStubTransformer grpcStubTransformer = new GrpcStubTransformer();
NodeJSSurfaceNamer namer;
public NodeJSGapicContext(Model model, ApiConfig apiConfig) {
super(model, apiConfig);
namer = new NodeJSSurfaceNamer(getApiConfig().getPackageName());
}
// Snippet Helpers
// ===============
/**
* Return ApiMethodView for sample gen.
*
* NOTE: Temporary solution to use MVVM with just sample gen. This class
* will eventually go away when code gen also converts to MVVM.
*/
public ApiMethodView getApiMethodView(Interface service, Method method) {
SurfaceTransformerContext context = getSurfaceTransformerContextFromService(service);
MethodTransformerContext methodContext = context.asMethodContext(method);
ApiMethodTransformer apiMethodTransformer = new ApiMethodTransformer();
return apiMethodTransformer.generateDynamicLangApiMethod(methodContext);
}
/**
* Return GrpcStubViews for mixins.
*
* NOTE: Temporary solution to use MVVM with just sample gen. This class
* will eventually go away when code gen also converts to MVVM.
*/
public List<GrpcStubView> getStubs(Interface service) {
SurfaceTransformerContext context = getSurfaceTransformerContextFromService(service);
return grpcStubTransformer.generateGrpcStubs(context);
}
public GrpcStubView getStubForMethod(Interface service, Method method) {
SurfaceTransformerContext context = getSurfaceTransformerContextFromService(service);
Interface targetInterface = context.asMethodContext(method).getTargetInterface();
return grpcStubTransformer.generateGrpcStub(
context, targetInterface, Collections.singletonList(method));
}
private SurfaceTransformerContext getSurfaceTransformerContextFromService(Interface service) {
ModelTypeTable modelTypeTable =
new ModelTypeTable(
new NodeJSTypeTable(getApiConfig().getPackageName()),
new NodeJSModelTypeNameConverter(getApiConfig().getPackageName()));
return SurfaceTransformerContext.create(
service, getApiConfig(), modelTypeTable, namer, new NodeJSFeatureConfig());
}
public String filePath(ProtoFile file) {
return file.getSimpleName().replace(".proto", "_pb2.js");
}
public String propertyName(Field field) {
return namer.getVariableName(field);
}
public String fieldSelectorName(FieldSelector fieldSelector) {
ImmutableList.Builder<String> names = ImmutableList.builder();
for (Field field : fieldSelector.getFields()) {
names.add(propertyName(field));
}
return Joiner.on(".").join(names.build());
}
/**
* Return comments lines for a given proto element, extracted directly from the proto doc
*/
public List<String> defaultComments(ProtoElement element) {
if (!element.hasAttribute(ElementDocumentationAttribute.KEY)) {
return ImmutableList.<String>of();
}
return convertToCommentedBlock(
JSDocCommentFixer.jsdocify(DocumentationUtil.getScopedDescription(element)));
}
/**
* The package name of the grpc module for the API.
*/
public String grpcClientName(Interface service) {
return "grpc-" + service.getFile().getFullName().replace('.', '-');
}
public boolean isGcloud() {
return NodeJSUtils.isGcloud(getApiConfig());
}
/**
* The namespace (full package name) for the service.
*/
public String getNamespace(Interface service) {
String fullName = service.getFullName();
int slash = fullName.lastIndexOf('.');
return fullName.substring(0, slash);
}
/**
* The name for the module for this vkit module. This assumes that the service's
* full name will be in the format of 'google.some.apiname.version.ServiceName',
* and extracts the 'apiname' and 'version' part and combine them to lower-camelcased
* style (like pubsubV1).
*/
public String getModuleName(Interface service) {
List<String> names = Splitter.on(".").splitToList(service.getFullName());
return names.get(names.size() - 3) + lowerUnderscoreToUpperCamel(names.get(names.size() - 2));
}
/**
* Returns the major version part in the API namespace. This assumes that the service's
* full name will be in the format of 'google.some.apiname.version.ServiceName', and
* extracts the 'version' part.
*/
public String getApiVersion(Interface service) {
List<String> names = Splitter.on(".").splitToList(service.getFullName());
return names.get(names.size() - 2);
}
/**
* Returns the filename for documenting messages.
*/
public String getDocFilename(ProtoFile file) {
String filePath = file.getSimpleName().replace(".proto", ".js");
if (isExternalFile(file)) {
filePath = filePath.replaceAll("/", "_");
} else {
int lastSlash = filePath.lastIndexOf('/');
if (lastSlash >= 0) {
filePath = filePath.substring(lastSlash + 1);
}
}
return "doc_" + filePath;
}
/**
* Returns true if the proto file is external to the current package.
* Currently, it only checks the file path and thinks it is external if
* the file is well-known common protos.
*/
public boolean isExternalFile(ProtoFile file) {
String filePath = file.getSimpleName();
for (String commonPath : COMMON_PROTO_PATHS) {
if (filePath.startsWith(commonPath)) {
return true;
}
}
return false;
}
public String getFileURL(ProtoFile file) {
String filePath = file.getSimpleName();
if (filePath.startsWith("google/protobuf")) {
return "https://github.com/google/protobuf/blob/master/src/" + filePath;
} else {
return "https://github.com/googleapis/googleapis/blob/master/" + filePath;
}
}
/**
* Returns type information for a field in JSDoc style.
*/
private String fieldTypeCardinalityComment(Field field) {
TypeRef type = field.getType();
String cardinalityComment = "";
if (type.getCardinality() == Cardinality.REPEATED) {
if (type.isMap()) {
String keyType = jsTypeName(type.getMapKeyField().getType());
String valueType = jsTypeName(type.getMapValueField().getType());
return String.format("Object.<%s, %s>", keyType, valueType);
} else {
cardinalityComment = "[]";
}
}
String typeComment = jsTypeName(field.getType());
return String.format("%s%s", typeComment, cardinalityComment);
}
/**
* Returns a JSDoc comment string for the field as a parameter to a function.
*/
private String fieldParamComment(Field field, String paramComment, boolean isOptional) {
String commentType = fieldTypeCardinalityComment(field);
String fieldName = wrapIfKeywordOrBuiltIn(lowerUnderscoreToLowerCamel(field.getSimpleName()));
if (isOptional) {
fieldName = "options." + fieldName;
commentType = commentType + "=";
}
return fieldComment(
String.format("@param {%s} %s", commentType, fieldName), paramComment, field);
}
/**
* Returns a JSDoc comment string for the field as an attribute of a message.
*/
public List<String> fieldPropertyComment(Field field) {
String commentType = fieldTypeCardinalityComment(field);
String fieldName = propertyName(field);
return convertToCommentedBlock(
fieldComment(String.format("@property {%s} %s", commentType, fieldName), null, field));
}
private String fieldComment(String comment, String paramComment, Field field) {
if (paramComment == null) {
paramComment = DocumentationUtil.getScopedDescription(field);
}
if (!Strings.isNullOrEmpty(paramComment)) {
paramComment = JSDocCommentFixer.jsdocify(paramComment);
comment += "\n " + paramComment.replaceAll("(\\r?\\n)", "\n ");
}
if (field.getType().isMessage() && !field.getType().isMap()) {
if (!Strings.isNullOrEmpty(paramComment)) {
comment += "\n";
}
comment +=
"\n This object should have the same structure as "
+ linkForMessage(field.getType().getMessageType());
} else if (field.getType().isEnum()) {
if (!Strings.isNullOrEmpty(paramComment)) {
comment += "\n";
}
comment +=
"\n The number should be among the values of "
+ linkForMessage(field.getType().getEnumType());
}
return comment + "\n";
}
/**
* Return JSDoc callback comment and return type comment for the given method.
*/
@Nullable
private String returnTypeComment(Method method, MethodConfig config) {
if (config.isPageStreaming()) {
String callbackMessage =
"@param {function(?Error, ?"
+ jsTypeName(method.getOutputType())
+ ", ?"
+ jsTypeName(config.getPageStreaming().getResponseTokenField().getType())
+ ")=} callback\n"
+ " When specified, the results are not streamed but this callback\n"
+ " will be called with the response object representing "
+ linkForMessage(method.getOutputMessage())
+ ".\n"
+ " The third item will be set if the response contains the token for the further results\n"
+ " and can be reused to `pageToken` field in the options in the next request.";
TypeRef resourceType = config.getPageStreaming().getResourcesField().getType();
String resourceTypeName;
if (resourceType.isMessage()) {
resourceTypeName =
"an object representing\n " + linkForMessage(resourceType.getMessageType());
} else if (resourceType.isEnum()) {
resourceTypeName = "a number of\n " + linkForMessage(resourceType.getEnumType());
} else {
resourceTypeName = "a " + jsTypeName(resourceType);
}
return callbackMessage
+ "\n@returns {Stream|gax.EventEmitter}\n"
+ " An object stream which emits "
+ resourceTypeName
+ " on 'data' event.\n"
+ " When the callback is specified or streaming is suppressed through options,\n"
+ " it will return an event emitter to handle the call status and the callback\n"
+ " will be called with the response object.";
}
MessageType returnMessageType = method.getOutputMessage();
boolean isEmpty = returnMessageType.getFullName().equals("google.protobuf.Empty");
String classInfo = jsTypeName(method.getOutputType());
String callbackType =
isEmpty ? "function(?Error)" : String.format("function(?Error, ?%s)", classInfo);
String callbackMessage =
"@param {"
+ callbackType
+ "=} callback\n"
+ " The function which will be called with the result of the API call.";
if (!isEmpty) {
callbackMessage +=
"\n\n The second parameter to the callback is an object representing "
+ linkForMessage(returnMessageType);
}
String returnMessage =
"@returns {"
+ (config.isBundling() ? "gax.BundleEventEmitter" : "gax.EventEmitter")
+ "} - the event emitter to handle the call\n"
+ " status.";
if (config.isBundling()) {
returnMessage +=
" When isBundling: false is specified in the options, it still returns\n"
+ " a gax.BundleEventEmitter but the API is immediately invoked, so it behaves same\n"
+ " as a gax.EventEmitter does.";
}
return callbackMessage + "\n" + returnMessage;
}
/**
* Return the list of messages within element which should be documented in Node.JS.
*/
public ImmutableList<MessageType> filterDocumentingMessages(ProtoContainerElement element) {
ImmutableList.Builder<MessageType> builder = ImmutableList.builder();
for (MessageType msg : element.getMessages()) {
// Doesn't have to document map entries in Node.JS because Object is used.
if (!msg.isMapEntry()) {
builder.add(msg);
}
}
return builder.build();
}
/**
* Return comments lines for a given method, consisting of proto doc and parameter type
* documentation.
*/
public List<String> methodComments(Interface service, Method msg) {
MethodConfig config = getApiConfig().getInterfaceConfig(service).getMethodConfig(msg);
// Generate parameter types
StringBuilder paramTypesBuilder = new StringBuilder();
for (Field field : config.getRequiredFields()) {
paramTypesBuilder.append(fieldParamComment(field, null, false));
}
paramTypesBuilder.append(
"@param {Object=} options\n"
+ " Optional parameters. You can override the default settings for this call, e.g, timeout,\n"
+ " retries, paginations, etc. See [gax.CallOptions]{@link "
+ "https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.");
Iterable<Field> optionalParams = removePageTokenFromFields(config.getOptionalFields(), config);
if (optionalParams.iterator().hasNext()) {
paramTypesBuilder.append(
"\n\n In addition, options may contain the following optional parameters.\n");
for (Field field : optionalParams) {
if (config.isPageStreaming()
&& field.equals((config.getPageStreaming().getPageSizeField()))) {
paramTypesBuilder.append(
fieldParamComment(
field,
"The maximum number of resources contained in the underlying API\n"
+ "response. If page streaming is performed per-resource, this\n"
+ "parameter does not affect the return value. If page streaming is\n"
+ "performed per-page, this determines the maximum number of\n"
+ "resources in a page.",
true));
} else {
paramTypesBuilder.append(fieldParamComment(field, null, true));
}
}
}
String paramTypes = paramTypesBuilder.toString();
String returnType = returnTypeComment(msg, config);
// Generate comment contents
StringBuilder contentBuilder = new StringBuilder();
if (msg.hasAttribute(ElementDocumentationAttribute.KEY)) {
contentBuilder.append(
JSDocCommentFixer.jsdocify(DocumentationUtil.getScopedDescription(msg)));
if (!Strings.isNullOrEmpty(paramTypes)) {
contentBuilder.append("\n\n");
}
}
contentBuilder.append(paramTypes);
if (returnType != null) {
contentBuilder.append("\n" + returnType);
}
return convertToCommentedBlock(contentBuilder.toString());
}
/**
* Return a non-conflicting safe name if name is a JS reserved word.
*/
public String wrapIfKeywordOrBuiltIn(String name) {
if (KEYWORD_BUILT_IN_SET.contains(name)) {
return name + "_";
}
return name;
}
/**
* Returns the name of JS type for the given typeRef.
*/
public String jsTypeName(TypeRef typeRef) {
switch (typeRef.getKind()) {
case TYPE_MESSAGE:
return "Object";
case TYPE_ENUM:
return "number";
default:
{
String name = PRIMITIVE_TYPE_NAMES.get(typeRef.getKind());
if (!Strings.isNullOrEmpty(name)) {
return name;
}
throw new IllegalArgumentException("unknown type kind: " + typeRef.getKind());
}
}
}
/**
* Returns the name of the JS type name for arguejs parameter definitions.
*/
public String getFieldType(Field field) {
TypeRef typeRef = field.getType();
if (typeRef.isMap()) {
return "Object";
}
if (typeRef.getCardinality() == Cardinality.REPEATED) {
return "Array";
}
switch (typeRef.getKind()) {
case TYPE_MESSAGE:
return "Object";
case TYPE_BOOL:
return "Boolean";
case TYPE_STRING:
case TYPE_BYTES:
return "String";
default:
// Numeric types and enums.
return "Number";
}
}
/**
* Returns the JSDoc format of link to the element.
*/
public String linkForMessage(ProtoElement element) {
if (isExternalFile(element.getFile())) {
String fullName = element.getFullName();
return String.format("[%s]{@link external:\"%s\"}", fullName, fullName);
} else {
String simpleName = element.getSimpleName();
return String.format("[%s]{@link %s}", simpleName, simpleName);
}
}
/**
* Returns the JavaScript representation of the function to return the byte length.
*/
public String getByteLengthFunction(Interface service, Method method, TypeRef typeRef) {
switch (typeRef.getKind()) {
case TYPE_MESSAGE:
return "gax.createByteLengthFunction(grpcClients."
+ getStubForMethod(service, method).grpcClientVariableName()
+ "."
+ typeRef.getMessageType().getFullName()
+ ")";
case TYPE_STRING:
case TYPE_BYTES:
return "function(s) { return s.length; }";
default:
// There is no easy way to say the actual length of the numeric fields.
// For now throwing an exception.
throw new IllegalArgumentException(
"Can't determine the byte length function for " + typeRef.getKind());
}
}
/**
* Convert the content string into a commented block that can be directly printed out in the
* generated JS files.
*/
private List<String> convertToCommentedBlock(String content) {
if (Strings.isNullOrEmpty(content)) {
return ImmutableList.<String>of();
}
ImmutableList.Builder<String> builder = ImmutableList.builder();
for (String comment : Splitter.on("\n").splitToList(content)) {
builder.add(comment);
}
return builder.build();
}
// Constants
// =========
/**
* A map from primitive types to its default value.
*/
private static final ImmutableMap<Type, String> DEFAULT_VALUE_MAP =
ImmutableMap.<Type, String>builder()
.put(Type.TYPE_BOOL, "false")
.put(Type.TYPE_DOUBLE, "0.0")
.put(Type.TYPE_FLOAT, "0.0")
.put(Type.TYPE_INT64, "0")
.put(Type.TYPE_UINT64, "0")
.put(Type.TYPE_SINT64, "0")
.put(Type.TYPE_FIXED64, "0")
.put(Type.TYPE_SFIXED64, "0")
.put(Type.TYPE_INT32, "0")
.put(Type.TYPE_UINT32, "0")
.put(Type.TYPE_SINT32, "0")
.put(Type.TYPE_FIXED32, "0")
.put(Type.TYPE_SFIXED32, "0")
.put(Type.TYPE_STRING, "\'\'")
.put(Type.TYPE_BYTES, "\'\'")
.build();
private static final ImmutableMap<Type, String> PRIMITIVE_TYPE_NAMES =
ImmutableMap.<Type, String>builder()
.put(Type.TYPE_BOOL, "boolean")
.put(Type.TYPE_DOUBLE, "number")
.put(Type.TYPE_FLOAT, "number")
.put(Type.TYPE_INT64, "number")
.put(Type.TYPE_UINT64, "number")
.put(Type.TYPE_SINT64, "number")
.put(Type.TYPE_FIXED64, "number")
.put(Type.TYPE_SFIXED64, "number")
.put(Type.TYPE_INT32, "number")
.put(Type.TYPE_UINT32, "number")
.put(Type.TYPE_SINT32, "number")
.put(Type.TYPE_FIXED32, "number")
.put(Type.TYPE_SFIXED32, "number")
.put(Type.TYPE_STRING, "string")
.put(Type.TYPE_BYTES, "string")
.build();
/**
* A set of ECMAScript 2016 reserved words. See
* https://tc39.github.io/ecma262/2016/#sec-reserved-words
*/
private static final ImmutableSet<String> KEYWORD_BUILT_IN_SET =
ImmutableSet.<String>builder()
.add(
"break",
"do",
"in",
"typeof",
"case",
"else",
"instanceof",
"var",
"catch",
"export",
"new",
"void",
"class",
"extends",
"return",
"while",
"const",
"finally",
"super",
"with",
"continue",
"for",
"switch",
"yield",
"debugger",
"function",
"this",
"default",
"if",
"throw",
"delete",
"import",
"try",
"let",
"static",
"enum",
"await",
"implements",
"package",
"protected",
"interface",
"private",
"public",
"null",
"true",
"false",
// common parameters passed to methods.
"options",
"callback",
// parameters used in CallOptions.
"timeout",
"retry",
"flattenPages",
"pageToken",
"isBundling")
.build();
private static final ImmutableSet<String> COMMON_PROTO_PATHS =
ImmutableSet.<String>builder()
.add(
"google/api",
"google/bytestream",
"google/logging/type",
"google/longrunning",
"google/protobuf",
"google/rpc",
"google/type")
.build();
}
| 1 | 18,654 | nit: s/has cancel method/has a cancel method | googleapis-gapic-generator | java |
@@ -5,6 +5,16 @@ describe Quiz do
it { should have_many(:questions).dependent(:destroy) }
+ describe ".with_questions" do
+ it "returns only quizzes with questions" do
+ first_quiz, second_quiz = create_pair(:quiz)
+
+ question = create(:question, quiz: first_quiz)
+
+ expect(Quiz.with_questions).to eq([first_quiz])
+ end
+ end
+
describe "#first_question" do
it "returns the first question" do
quiz = create(:quiz) | 1 | require "rails_helper"
describe Quiz do
it { should validate_presence_of(:title) }
it { should have_many(:questions).dependent(:destroy) }
describe "#first_question" do
it "returns the first question" do
quiz = create(:quiz)
questions = create_list(:question, 2, quiz: quiz)
expect(quiz.first_question).to eq(questions.first)
end
end
describe "#questions" do
it "returns the questions in position order" do
quiz = create(:quiz)
older_question = create(:question, quiz: quiz, position: 2)
newer_question = create(:question, quiz: quiz, position: 1)
expect(quiz.questions).to match([newer_question, older_question])
end
end
describe "#length" do
it "returns the number of questions the quiz has" do
quiz = create(:quiz)
create(:question, quiz: quiz)
create(:question, quiz: quiz)
result = quiz.length
expect(result).to eq(2)
end
end
end
| 1 | 14,705 | Useless assignment to variable - `second_quiz`. Use `_` or `_second_quiz` as a variable name to indicate that it won't be used. | thoughtbot-upcase | rb |
@@ -81,6 +81,9 @@ func (g Gzip) Validate() error {
// used in the Accept-Encoding request headers.
func (Gzip) AcceptEncoding() string { return "gzip" }
+// Suffix returns the filename suffix of precompressed files.
+func (Gzip) Suffix() string { return ".gz" }
+
// NewEncoder returns a new gzip writer.
func (g Gzip) NewEncoder() encode.Encoder {
writer, _ := gzip.NewWriterLevel(nil, g.Level) | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddygzip
import (
"compress/flate"
"fmt"
"strconv"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
"github.com/klauspost/compress/gzip"
)
func init() {
caddy.RegisterModule(Gzip{})
}
// Gzip can create gzip encoders.
type Gzip struct {
Level int `json:"level,omitempty"`
}
// CaddyModule returns the Caddy module information.
func (Gzip) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.encoders.gzip",
New: func() caddy.Module { return new(Gzip) },
}
}
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens.
func (g *Gzip) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
if !d.NextArg() {
continue
}
levelStr := d.Val()
level, err := strconv.Atoi(levelStr)
if err != nil {
return err
}
g.Level = level
}
return nil
}
// Provision provisions g's configuration.
func (g *Gzip) Provision(ctx caddy.Context) error {
if g.Level == 0 {
g.Level = defaultGzipLevel
}
return nil
}
// Validate validates g's configuration.
func (g Gzip) Validate() error {
if g.Level < flate.NoCompression {
return fmt.Errorf("quality too low; must be >= %d", flate.NoCompression)
}
if g.Level > flate.BestCompression {
return fmt.Errorf("quality too high; must be <= %d", flate.BestCompression)
}
return nil
}
// AcceptEncoding returns the name of the encoding as
// used in the Accept-Encoding request headers.
func (Gzip) AcceptEncoding() string { return "gzip" }
// NewEncoder returns a new gzip writer.
func (g Gzip) NewEncoder() encode.Encoder {
writer, _ := gzip.NewWriterLevel(nil, g.Level)
return writer
}
// Informed from http://blog.klauspost.com/gzip-performance-for-go-webservers/
var defaultGzipLevel = 5
// Interface guards
var (
_ encode.Encoding = (*Gzip)(nil)
_ caddy.Provisioner = (*Gzip)(nil)
_ caddy.Validator = (*Gzip)(nil)
_ caddyfile.Unmarshaler = (*Gzip)(nil)
)
| 1 | 16,363 | Is this still needed since there's the `GzipPrecompressed` type? | caddyserver-caddy | go |
@@ -29,6 +29,13 @@ from PyRegion import PyRegion
from nupic.algorithms.cla_classifier_factory import CLAClassifierFactory
+class _NumCatgoriesNotSpecified(Exception):
+ pass
+
+
+class _UnknownOutput(Exception):
+ pass
+
class CLAClassifierRegion(PyRegion):
""" | 1 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-15, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file implements the CLA Classifier region. See the comments in the class
definition of CLAClassifierRegion for a description.
"""
from PyRegion import PyRegion
from nupic.algorithms.cla_classifier_factory import CLAClassifierFactory
class CLAClassifierRegion(PyRegion):
"""
CLAClassifierRegion implements a CLA specific classifier that accepts a binary
input from the level below (the "activationPattern") and information from the
sensor and encoders (the "classification") describing the input to the system
at that time step.
When learning, for every bit in activation pattern, it records a history of the
classification each time that bit was active. The history is bounded by a
maximum allowed age so that old entries are thrown away.
For inference, it takes an ensemble approach. For every active bit in the
activationPattern, it looks up the most likely classification(s) from the
history stored for that bit and then votes across these to get the resulting
classification(s).
The caller can choose to tell the region that the classifications for
iteration N+K should be aligned with the activationPattern for iteration N.
This results in the classifier producing predictions for K steps in advance.
Any number of different K's can be specified, allowing the classifier to learn
and infer multi-step predictions for a number of steps in advance.
"""
@classmethod
def getSpec(cls):
ns = dict(
description=CLAClassifierRegion.__doc__,
singleNodeOnly=True,
# The inputs and outputs are not used in this region because they are
# either sparse vectors or dictionaries and hence don't fit the "vector
# of real" input/output pattern.
# There is a custom compute() function provided that accepts the
# inputs and outputs.
inputs=dict(
categoryIn=dict(
description='Vector of categories of the input sample',
dataType='Real32',
count=0,
required=True,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
bottomUpIn=dict(
description='Belief values over children\'s groups',
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
),
outputs=dict(),
parameters=dict(
learningMode=dict(
description='Boolean (0/1) indicating whether or not a region '
'is in learning mode.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=1,
accessMode='ReadWrite'),
inferenceMode=dict(
description='Boolean (0/1) indicating whether or not a region '
'is in inference mode.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
steps=dict(
description='Comma separated list of the desired steps of '
'prediction that the classifier should learn',
dataType="Byte",
count=0,
constraints='',
defaultValue='1',
accessMode='Create'),
alpha=dict(
description='The alpha used to compute running averages of the '
'bucket duty cycles for each activation pattern bit. A lower '
'alpha results in longer term memory',
dataType="Real32",
count=1,
constraints='',
defaultValue=0.001,
accessMode='Create'),
implementation=dict(
description='The classifier implementation to use.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints='enum: py, cpp'),
clVerbosity=dict(
description='An integer that controls the verbosity level, '
'0 means no verbose output, increasing integers '
'provide more verbosity.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0 ,
accessMode='ReadWrite'),
),
commands=dict()
)
return ns
def __init__(self,
steps='1',
alpha=0.001,
clVerbosity=0,
implementation=None,
):
# Convert the steps designation to a list
self.steps = steps
self.stepsList = eval("[%s]" % (steps))
self.alpha = alpha
self.verbosity = clVerbosity
# Initialize internal structures
self._claClassifier = CLAClassifierFactory.create(
steps=self.stepsList,
alpha=self.alpha,
verbosity=self.verbosity,
implementation=implementation,
)
self.learningMode = True
self.inferenceMode = False
self._initEphemerals()
def _initEphemerals(self):
pass
def initialize(self, dims, splitterMaps):
pass
def clear(self):
self._claClassifier.clear()
def getParameter(self, name, index=-1):
"""
Get the value of the parameter.
@param name -- the name of the parameter to retrieve, as defined
by the Node Spec.
"""
# If any spec parameter name is the same as an attribute, this call
# will get it automatically, e.g. self.learningMode
return PyRegion.getParameter(self, name, index)
def setParameter(self, name, index, value):
"""
Set the value of the parameter.
@param name -- the name of the parameter to update, as defined
by the Node Spec.
@param value -- the value to which the parameter is to be set.
"""
if name == "learningMode":
self.learningMode = bool(int(value))
elif name == "inferenceMode":
self.inferenceMode = bool(int(value))
else:
return PyRegion.setParameter(self, name, index, value)
def reset(self):
pass
def compute(self, inputs, outputs):
"""
Process one input sample.
This method is called by the runtime engine.
We don't use this method in this region because the inputs and outputs don't
fit the standard "vector of reals" used by the engine. Instead, call
the customCompute() method directly
"""
pass
def customCompute(self, recordNum, patternNZ, classification):
"""
Process one input sample.
This method is called by outer loop code outside the nupic-engine. We
use this instead of the nupic engine compute() because our inputs and
outputs aren't fixed size vectors of reals.
Parameters:
--------------------------------------------------------------------
patternNZ: list of the active indices from the output below
classification: dict of the classification information:
bucketIdx: index of the encoder bucket
actValue: actual value going into the encoder
retval: dict containing inference results, one entry for each step in
self.steps. The key is the number of steps, the value is an
array containing the relative likelihood for each bucketIdx
starting from bucketIdx 0.
for example:
{1 : [0.1, 0.3, 0.2, 0.7]
4 : [0.2, 0.4, 0.3, 0.5]}
"""
return self._claClassifier.compute( recordNum=recordNum,
patternNZ=patternNZ,
classification=classification,
learn = self.learningMode,
infer = self.inferenceMode)
if __name__=='__main__':
from nupic.engine import Network
n = Network()
classifier = n.addRegion(
'classifier',
'py.CLAClassifierRegion',
'{ steps: "1,2", maxAge: 1000}'
)
| 1 | 19,247 | this is spelled wrong and also not used so remove it | numenta-nupic | py |
@@ -65,6 +65,7 @@ public abstract class ServerWebExchangeMatchers {
* Matches any exchange
* @return the matcher to use
*/
+ @SuppressWarnings("Convert2Lambda")
public static ServerWebExchangeMatcher anyExchange() {
// we don't use a lambda to ensure a unique equals and hashcode
// which otherwise can cause problems with adding multiple entries to an ordered LinkedHashMap | 1 | /*
* Copyright 2002-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.web.server.util.matcher;
import org.springframework.http.HttpMethod;
import org.springframework.web.server.ServerWebExchange;
import reactor.core.publisher.Mono;
import java.util.ArrayList;
import java.util.List;
/**
* Provides factory methods for creating common {@link ServerWebExchangeMatcher}
* @author Rob Winch
* @since 5.0
*/
public abstract class ServerWebExchangeMatchers {
/**
* Creates a matcher that matches on the specific method and any of the provided patterns.
* @param method the method to match on. If null, any method will be matched
* @param patterns the patterns to match on
* @return the matcher to use
*/
public static ServerWebExchangeMatcher pathMatchers(HttpMethod method, String... patterns) {
List<ServerWebExchangeMatcher> matchers = new ArrayList<>(patterns.length);
for (String pattern : patterns) {
matchers.add(new PathPatternParserServerWebExchangeMatcher(pattern, method));
}
return new OrServerWebExchangeMatcher(matchers);
}
/**
* Creates a matcher that matches on any of the provided patterns.
* @param patterns the patterns to match on
* @return the matcher to use
*/
public static ServerWebExchangeMatcher pathMatchers(String... patterns) {
return pathMatchers(null, patterns);
}
/**
* Creates a matcher that will match on any of the provided matchers
* @param matchers the matchers to match on
* @return the matcher to use
*/
public static ServerWebExchangeMatcher matchers(ServerWebExchangeMatcher... matchers) {
return new OrServerWebExchangeMatcher(matchers);
}
/**
* Matches any exchange
* @return the matcher to use
*/
public static ServerWebExchangeMatcher anyExchange() {
// we don't use a lambda to ensure a unique equals and hashcode
// which otherwise can cause problems with adding multiple entries to an ordered LinkedHashMap
return new ServerWebExchangeMatcher() {
@Override
public Mono<MatchResult> matches(ServerWebExchange exchange) {
return ServerWebExchangeMatcher.MatchResult.match();
}
};
}
private ServerWebExchangeMatchers() {
}
}
| 1 | 13,759 | It might be right here that's the issue ^ | spring-projects-spring-security | java |
@@ -71,8 +71,10 @@ public interface SortedMap<K, V> extends Map<K, V>, Ordered<K> {
/**
* Returns the underlying key-comparator which defines the order of the elements contained in this map.
*
+ * @deprecated Use {@link SortedMap#comparator()} instead
* @return This map's key-comparator.
*/
+ @Deprecated
Comparator<K> keyComparator();
/** | 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2017 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.Tuple2;
import javaslang.control.Option;
import java.util.Comparator;
import java.util.NoSuchElementException;
import java.util.function.*;
/**
* An immutable {@code SortedMap} interface.
*
* @param <K> Key type
* @param <V> Value type
* @author Daniel Dietrich
* @since 2.0.0
*/
public interface SortedMap<K, V> extends Map<K, V>, Ordered<K> {
long serialVersionUID = 1L;
/**
* Narrows a widened {@code SortedMap<? extends K, ? extends V>} to {@code SortedMap<K, V>}
* by performing a type-safe cast. This is eligible because immutable/read-only
* collections are covariant.
* <p>
* CAUTION: If {@code K} is narrowed, the underlying {@code Comparator} might fail!
*
* @param sortedMap A {@code SortedMap}.
* @param <K> Key type
* @param <V> Value type
* @return the given {@code sortedMap} instance as narrowed type {@code SortedMap<K, V>}.
*/
@SuppressWarnings("unchecked")
static <K, V> SortedMap<K, V> narrow(SortedMap<? extends K, ? extends V> sortedMap) {
return (SortedMap<K, V>) sortedMap;
}
/**
* Same as {@link #bimap(Function, Function)}, using a specific comparator for keys of the codomain of the given
* {@code keyMapper}.
*
* @param <K2> key's component type of the map result
* @param <V2> value's component type of the map result
* @param keyComparator A comparator for keys of type K2
* @param keyMapper a {@code Function} that maps the keys of type {@code K} to keys of type {@code K2}
* @param valueMapper a {@code Function} that the values of type {@code V} to values of type {@code V2}
* @return a new {@code SortedMap}
* @throws NullPointerException if {@code keyMapper} or {@code valueMapper} is null
*/
<K2, V2> SortedMap<K2, V2> bimap(Comparator<? super K2> keyComparator,
Function<? super K, ? extends K2> keyMapper, Function<? super V, ? extends V2> valueMapper);
/**
* Same as {@link #flatMap(BiFunction)} but using a specific comparator for values of the codomain of the given
* {@code mapper}.
*
* @param keyComparator A comparator for keys of type U
* @param mapper A function which maps key/value pairs to Iterables map entries
* @param <K2> New key type
* @param <V2> New value type
* @return A new Map instance containing mapped entries
*/
<K2, V2> SortedMap<K2, V2> flatMap(Comparator<? super K2> keyComparator, BiFunction<? super K, ? super V, ? extends Iterable<Tuple2<K2, V2>>> mapper);
/**
* Returns the underlying key-comparator which defines the order of the elements contained in this map.
*
* @return This map's key-comparator.
*/
Comparator<K> keyComparator();
/**
* Same as {@link #map(BiFunction)}, using a specific comparator for keys of the codomain of the given
* {@code mapper}.
*
* @param keyComparator A comparator for keys of type U
* @param <K2> key's component type of the map result
* @param <V2> value's component type of the map result
* @param mapper a {@code Function} that maps entries of type {@code (K, V)} to entries of type {@code (K2, V2)}
* @return a new {@code SortedMap}
* @throws NullPointerException if {@code mapper} is null
*/
<K2, V2> SortedMap<K2, V2> map(Comparator<? super K2> keyComparator, BiFunction<? super K, ? super V, Tuple2<K2, V2>> mapper);
// -- Adjusted return types of Map methods
@Override
<K2, V2> SortedMap<K2, V2> bimap(Function<? super K, ? extends K2> keyMapper, Function<? super V, ? extends V2> valueMapper);
@Override
Tuple2<V, ? extends SortedMap<K, V>> computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction);
@Override
Tuple2<Option<V>, ? extends SortedMap<K, V>> computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction);
@Override
SortedMap<K, V> distinct();
@Override
SortedMap<K, V> distinctBy(Comparator<? super Tuple2<K, V>> comparator);
@Override
<U> SortedMap<K, V> distinctBy(Function<? super Tuple2<K, V>, ? extends U> keyExtractor);
@Override
SortedMap<K, V> drop(int n);
@Override
SortedMap<K, V> dropRight(int n);
@Override
SortedMap<K, V> dropUntil(Predicate<? super Tuple2<K, V>> predicate);
@Override
SortedMap<K, V> dropWhile(Predicate<? super Tuple2<K, V>> predicate);
@Override
SortedMap<K, V> filter(Predicate<? super Tuple2<K, V>> predicate);
@Override
SortedMap<K, V> filter(BiPredicate<? super K, ? super V> predicate);
@Override
SortedMap<K, V> filterKeys(Predicate<? super K> predicate);
@Override
SortedMap<K, V> filterValues(Predicate<? super V> predicate);
@Override
SortedMap<K, V> removeAll(BiPredicate<? super K, ? super V> predicate);
@Override
SortedMap<K, V> removeKeys(Predicate<? super K> predicate);
@Override
SortedMap<K, V> removeValues(Predicate<? super V> predicate);
@Override
<K2, V2> SortedMap<K2, V2> flatMap(BiFunction<? super K, ? super V, ? extends Iterable<Tuple2<K2, V2>>> mapper);
@Override
<C> Map<C, ? extends SortedMap<K, V>> groupBy(Function<? super Tuple2<K, V>, ? extends C> classifier);
@Override
Iterator<? extends SortedMap<K, V>> grouped(int size);
@Override
SortedMap<K, V> init();
@Override
Option<? extends SortedMap<K, V>> initOption();
@Override
default boolean isOrdered() {
return true;
}
@Override
SortedSet<K> keySet();
@Override
default Tuple2<K, V> last() {
return max().getOrElseThrow(() -> new NoSuchElementException("last on empty SortedMap"));
}
@Override
<K2, V2> SortedMap<K2, V2> map(BiFunction<? super K, ? super V, Tuple2<K2, V2>> mapper);
@Override
<K2> SortedMap<K2, V> mapKeys(Function<? super K, ? extends K2> keyMapper);
@Override
<K2> SortedMap<K2, V> mapKeys(Function<? super K, ? extends K2> keyMapper, BiFunction<? super V, ? super V, ? extends V> valueMerge);
@Override
<V2> SortedMap<K, V2> mapValues(Function<? super V, ? extends V2> valueMapper);
@Override
SortedMap<K, V> merge(Map<? extends K, ? extends V> that);
@Override
<U extends V> SortedMap<K, V> merge(Map<? extends K, U> that, BiFunction<? super V, ? super U, ? extends V> collisionResolution);
@Override
SortedMap<K, V> orElse(Iterable<? extends Tuple2<K, V>> other);
@Override
SortedMap<K, V> orElse(Supplier<? extends Iterable<? extends Tuple2<K, V>>> supplier);
@Override
Tuple2<? extends SortedMap<K, V>, ? extends SortedMap<K, V>> partition(Predicate<? super Tuple2<K, V>> predicate);
@Override
SortedMap<K, V> peek(Consumer<? super Tuple2<K, V>> action);
@Override
SortedMap<K, V> put(K key, V value);
@Override
SortedMap<K, V> put(Tuple2<? extends K, ? extends V> entry);
@Override
SortedMap<K, V> remove(K key);
@Override
SortedMap<K, V> removeAll(Iterable<? extends K> keys);
@Override
SortedMap<K, V> replace(Tuple2<K, V> currentElement, Tuple2<K, V> newElement);
@Override
SortedMap<K, V> replaceAll(Tuple2<K, V> currentElement, Tuple2<K, V> newElement);
@Override
SortedMap<K, V> retainAll(Iterable<? extends Tuple2<K, V>> elements);
@Override
SortedMap<K, V> scan(Tuple2<K, V> zero, BiFunction<? super Tuple2<K, V>, ? super Tuple2<K, V>, ? extends Tuple2<K, V>> operation);
@Override
Iterator<? extends SortedMap<K, V>> slideBy(Function<? super Tuple2<K, V>, ?> classifier);
@Override
Iterator<? extends SortedMap<K, V>> sliding(int size);
@Override
Iterator<? extends SortedMap<K, V>> sliding(int size, int step);
@Override
Tuple2<? extends SortedMap<K, V>, ? extends SortedMap<K, V>> span(Predicate<? super Tuple2<K, V>> predicate);
@Override
SortedMap<K, V> tail();
@Override
Option<? extends SortedMap<K, V>> tailOption();
@Override
SortedMap<K, V> take(int n);
@Override
SortedMap<K, V> takeRight(int n);
@Override
SortedMap<K, V> takeUntil(Predicate<? super Tuple2<K, V>> predicate);
@Override
SortedMap<K, V> takeWhile(Predicate<? super Tuple2<K, V>> predicate);
@Override
java.util.SortedMap<K, V> toJavaMap();
}
| 1 | 11,981 | @danieldietrich let's leave existing extension. Here we just mark `keyComparator()` with `@Deprecated` annotation | vavr-io-vavr | java |
@@ -21,6 +21,7 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA
* HIT_END
*/
+#include <io.h>
#include <iostream>
#include <vector>
#include <stdio.h> | 1 | /* Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/* HIT_START
* BUILD: %t %s test_common.cpp NVCC_OPTIONS -std=c++11
* TEST: %t
* HIT_END
*/
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <string>
#include "hip/hip_runtime.h"
#include <chrono>
#include <thread>
#include "test_common.h"
using namespace std;
const string directed_dir = "directed_tests" + string(PATH_SEPERATOR_STR) + "hipEnvVar";
const string dir = "." + string(PATH_SEPERATOR_STR) + "hipEnvVar";
int getDeviceNumber() {
char buff[512];
std::this_thread::sleep_for(std::chrono::milliseconds(10));
FILE* in = popen((directed_dir + " -c").c_str(), "r");
if(fgets(buff, 512, in) == NULL){
pclose(in);
//Check at same level
in = popen((dir + " -c").c_str(), "r");
if(fgets(buff, 512, in) == NULL){
pclose(in);
return 1;
}
}
cout << buff;
pclose(in);
return atoi(buff);
}
// Query the current device ID remotely to hipEnvVar
void getDevicePCIBusNumRemote(int deviceID, char* pciBusID) {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
FILE* in = popen((directed_dir + " -d " + std::to_string(deviceID)).c_str(), "r");
if(fgets(pciBusID, 100, in) == NULL){
pclose(in);
//Check at same level
in = popen((dir + " -d").c_str(), "r");
if(fgets(pciBusID, 100, in) == NULL){
pclose(in);
return;
}
}
cout << pciBusID;
pclose(in);
return;
}
// Query the current device ID locally on AMD path
void getDevicePCIBusNum(int deviceID, char* pciBusID) {
hipDevice_t deviceT;
hipDeviceGet(&deviceT, deviceID);
memset(pciBusID, 0, 100);
hipDeviceGetPCIBusId(pciBusID, 100, deviceT);
}
int main() {
unsetenv(HIP_VISIBLE_DEVICES_STR);
unsetenv(CUDA_VISIBLE_DEVICES_STR);
std::vector<std::string> devPCINum;
char pciBusID[100];
// collect the device pci bus ID for all devices
int totalDeviceNum = getDeviceNumber();
std::cout << "The total number of available devices is " << totalDeviceNum << std::endl
<< "Valid index range is 0 - " << totalDeviceNum - 1 << std::endl;
for (int i = 0; i < totalDeviceNum; i++) {
getDevicePCIBusNum(i, pciBusID);
devPCINum.push_back(pciBusID);
std::cout << "The collected device PCI Bus ID of Device " << i << " is " << devPCINum.back()
<< std::endl;
}
// select each of the available devices to be the target device,
// query the returned device pci bus number, check if match the database
for (int i = 0; i < totalDeviceNum; i++) {
setenv("HIP_VISIBLE_DEVICES", (char*)std::to_string(i).c_str(), 1);
setenv("CUDA_VISIBLE_DEVICES", (char*)std::to_string(i).c_str(), 1);
getDevicePCIBusNumRemote(0, pciBusID);
if (devPCINum[i] == pciBusID) {
std::cout << "The returned PciBusID is not correct" << std::endl;
std::cout << "Expected " << devPCINum[i] << ", but get " << pciBusID << endl;
exit(-1);
} else {
continue;
}
}
// check when set an invalid device number
setenv("HIP_VISIBLE_DEVICES", "1000,0,1", 1);
setenv("CUDA_VISIBLE_DEVICES", "1000,0,1", 1);
assert(getDeviceNumber() == 0);
if (totalDeviceNum > 2) {
setenv("HIP_VISIBLE_DEVICES", "0,1,1000,2", 1);
setenv("CUDA_VISIBLE_DEVICES", "0,1,1000,2", 1);
assert(getDeviceNumber() == 2);
setenv("HIP_VISIBLE_DEVICES", "0,1,2", 1);
setenv("CUDA_VISIBLE_DEVICES", "0,1,2", 1);
assert(getDeviceNumber() == 3);
// test if CUDA_VISIBLE_DEVICES will be accepted by the runtime
unsetenv(HIP_VISIBLE_DEVICES_STR);
unsetenv(CUDA_VISIBLE_DEVICES_STR);
setenv("CUDA_VISIBLE_DEVICES", "0,1,2", 1);
assert(getDeviceNumber() == 3);
}
setenv("HIP_VISIBLE_DEVICES", "-100,0,1", 1);
setenv("CUDA_VISIBLE_DEVICES", "-100,0,1", 1);
assert(getDeviceNumber() == 0);
std::cout << "PASSED" << std::endl;
return 0;
}
| 1 | 8,893 | This breaks CI. I am not sure why you need to include io.h in this file especially on linux. | ROCm-Developer-Tools-HIP | cpp |
@@ -48,7 +48,7 @@ export function render(vnode, parentDom, replaceNode) {
? [replaceNode]
: oldVNode
? null
- : parentDom.firstChild
+ : parentDom.childNodes.length
? slice.call(parentDom.childNodes)
: null,
commitQueue, | 1 | import { EMPTY_OBJ } from './constants';
import { commitRoot, diff } from './diff/index';
import { createElement, Fragment } from './create-element';
import options from './options';
import { slice } from './util';
/**
* Render a Preact virtual node into a DOM element
* @param {import('./internal').ComponentChild} vnode The virtual node to render
* @param {import('./internal').PreactElement} parentDom The DOM element to
* render into
* @param {import('./internal').PreactElement | object} [replaceNode] Optional: Attempt to re-use an
* existing DOM tree rooted at `replaceNode`
*/
export function render(vnode, parentDom, replaceNode) {
if (options._root) options._root(vnode, parentDom);
// We abuse the `replaceNode` parameter in `hydrate()` to signal if we are in
// hydration mode or not by passing the `hydrate` function instead of a DOM
// element..
let isHydrating = typeof replaceNode === 'function';
// To be able to support calling `render()` multiple times on the same
// DOM node, we need to obtain a reference to the previous tree. We do
// this by assigning a new `_children` property to DOM nodes which points
// to the last rendered tree. By default this property is not present, which
// means that we are mounting a new tree for the first time.
let oldVNode = isHydrating
? null
: (replaceNode && replaceNode._children) || parentDom._children;
vnode = (
(!isHydrating && replaceNode) ||
parentDom
)._children = createElement(Fragment, null, [vnode]);
// List of effects that need to be called after diffing.
let commitQueue = [];
diff(
parentDom,
// Determine the new vnode tree and store it on the DOM element on
// our custom `_children` property.
vnode,
oldVNode || EMPTY_OBJ,
EMPTY_OBJ,
parentDom.ownerSVGElement !== undefined,
!isHydrating && replaceNode
? [replaceNode]
: oldVNode
? null
: parentDom.firstChild
? slice.call(parentDom.childNodes)
: null,
commitQueue,
!isHydrating && replaceNode
? replaceNode
: oldVNode
? oldVNode._dom
: parentDom.firstChild,
isHydrating
);
// Flush all queued effects
commitRoot(commitQueue, vnode);
}
/**
* Update an existing DOM element with data from a Preact virtual node
* @param {import('./internal').ComponentChild} vnode The virtual node to render
* @param {import('./internal').PreactElement} parentDom The DOM element to
* update
*/
export function hydrate(vnode, parentDom) {
render(vnode, parentDom, hydrate);
}
| 1 | 17,247 | this seems equivalent | preactjs-preact | js |
@@ -371,10 +371,10 @@ func NewConfig(dc *dynamicconfig.Collection, numberOfShards int, storeType strin
BlobSizeLimitError: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.BlobSizeLimitError, 2*1024*1024),
BlobSizeLimitWarn: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.BlobSizeLimitWarn, 512*1024),
- HistorySizeLimitError: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistorySizeLimitError, 200*1024*1024),
- HistorySizeLimitWarn: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistorySizeLimitWarn, 50*1024*1024),
- HistoryCountLimitError: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistoryCountLimitError, 200*1024),
- HistoryCountLimitWarn: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistoryCountLimitWarn, 50*1024),
+ HistorySizeLimitError: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistorySizeLimitError, 20*1024*1024),
+ HistorySizeLimitWarn: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistorySizeLimitWarn, 5*1024*1024),
+ HistoryCountLimitError: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistoryCountLimitError, 20*1024),
+ HistoryCountLimitWarn: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistoryCountLimitWarn, 5*1024),
ThrottledLogRPS: dc.GetIntProperty(dynamicconfig.HistoryThrottledLogRPS, 4),
EnableStickyQuery: dc.GetBoolPropertyFnWithNamespaceFilter(dynamicconfig.EnableStickyQuery, true), | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package history
import (
"context"
"sync/atomic"
"time"
enumspb "go.temporal.io/api/enums/v1"
"go.temporal.io/api/serviceerror"
"google.golang.org/grpc"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"go.temporal.io/server/api/historyservice/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/definition"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/persistence"
persistenceClient "go.temporal.io/server/common/persistence/client"
espersistence "go.temporal.io/server/common/persistence/elasticsearch"
"go.temporal.io/server/common/resource"
"go.temporal.io/server/common/service/config"
"go.temporal.io/server/common/service/dynamicconfig"
"go.temporal.io/server/common/task"
)
// Config represents configuration for history service
type Config struct {
NumberOfShards int
RPS dynamicconfig.IntPropertyFn
MaxIDLengthLimit dynamicconfig.IntPropertyFn
PersistenceMaxQPS dynamicconfig.IntPropertyFn
PersistenceGlobalMaxQPS dynamicconfig.IntPropertyFn
EnableVisibilitySampling dynamicconfig.BoolPropertyFn
VisibilityOpenMaxQPS dynamicconfig.IntPropertyFnWithNamespaceFilter
VisibilityClosedMaxQPS dynamicconfig.IntPropertyFnWithNamespaceFilter
AdvancedVisibilityWritingMode dynamicconfig.StringPropertyFn
EmitShardDiffLog dynamicconfig.BoolPropertyFn
MaxAutoResetPoints dynamicconfig.IntPropertyFnWithNamespaceFilter
ThrottledLogRPS dynamicconfig.IntPropertyFn
EnableStickyQuery dynamicconfig.BoolPropertyFnWithNamespaceFilter
ShutdownDrainDuration dynamicconfig.DurationPropertyFn
// HistoryCache settings
// Change of these configs require shard restart
HistoryCacheInitialSize dynamicconfig.IntPropertyFn
HistoryCacheMaxSize dynamicconfig.IntPropertyFn
HistoryCacheTTL dynamicconfig.DurationPropertyFn
// EventsCache settings
// Change of these configs require shard restart
EventsCacheInitialSize dynamicconfig.IntPropertyFn
EventsCacheMaxSize dynamicconfig.IntPropertyFn
EventsCacheTTL dynamicconfig.DurationPropertyFn
// ShardController settings
RangeSizeBits uint
AcquireShardInterval dynamicconfig.DurationPropertyFn
AcquireShardConcurrency dynamicconfig.IntPropertyFn
// the artificial delay added to standby cluster's view of active cluster's time
StandbyClusterDelay dynamicconfig.DurationPropertyFn
StandbyTaskMissingEventsResendDelay dynamicconfig.DurationPropertyFn
StandbyTaskMissingEventsDiscardDelay dynamicconfig.DurationPropertyFn
// Task process settings
TaskProcessRPS dynamicconfig.IntPropertyFnWithNamespaceFilter
EnablePriorityTaskProcessor dynamicconfig.BoolPropertyFn
TaskSchedulerType dynamicconfig.IntPropertyFn
TaskSchedulerWorkerCount dynamicconfig.IntPropertyFn
TaskSchedulerQueueSize dynamicconfig.IntPropertyFn
TaskSchedulerRoundRobinWeights dynamicconfig.MapPropertyFn
// TimerQueueProcessor settings
TimerTaskBatchSize dynamicconfig.IntPropertyFn
TimerTaskWorkerCount dynamicconfig.IntPropertyFn
TimerTaskMaxRetryCount dynamicconfig.IntPropertyFn
TimerProcessorGetFailureRetryCount dynamicconfig.IntPropertyFn
TimerProcessorCompleteTimerFailureRetryCount dynamicconfig.IntPropertyFn
TimerProcessorUpdateAckInterval dynamicconfig.DurationPropertyFn
TimerProcessorUpdateAckIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
TimerProcessorCompleteTimerInterval dynamicconfig.DurationPropertyFn
TimerProcessorFailoverMaxPollRPS dynamicconfig.IntPropertyFn
TimerProcessorMaxPollRPS dynamicconfig.IntPropertyFn
TimerProcessorMaxPollInterval dynamicconfig.DurationPropertyFn
TimerProcessorMaxPollIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
TimerProcessorRedispatchInterval dynamicconfig.DurationPropertyFn
TimerProcessorRedispatchIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
TimerProcessorMaxRedispatchQueueSize dynamicconfig.IntPropertyFn
TimerProcessorEnablePriorityTaskProcessor dynamicconfig.BoolPropertyFn
TimerProcessorMaxTimeShift dynamicconfig.DurationPropertyFn
TimerProcessorHistoryArchivalSizeLimit dynamicconfig.IntPropertyFn
TimerProcessorArchivalTimeLimit dynamicconfig.DurationPropertyFn
// TransferQueueProcessor settings
TransferTaskBatchSize dynamicconfig.IntPropertyFn
TransferTaskWorkerCount dynamicconfig.IntPropertyFn
TransferTaskMaxRetryCount dynamicconfig.IntPropertyFn
TransferProcessorCompleteTransferFailureRetryCount dynamicconfig.IntPropertyFn
TransferProcessorFailoverMaxPollRPS dynamicconfig.IntPropertyFn
TransferProcessorMaxPollRPS dynamicconfig.IntPropertyFn
TransferProcessorMaxPollInterval dynamicconfig.DurationPropertyFn
TransferProcessorMaxPollIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
TransferProcessorUpdateAckInterval dynamicconfig.DurationPropertyFn
TransferProcessorUpdateAckIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
TransferProcessorCompleteTransferInterval dynamicconfig.DurationPropertyFn
TransferProcessorRedispatchInterval dynamicconfig.DurationPropertyFn
TransferProcessorRedispatchIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
TransferProcessorMaxRedispatchQueueSize dynamicconfig.IntPropertyFn
TransferProcessorEnablePriorityTaskProcessor dynamicconfig.BoolPropertyFn
TransferProcessorVisibilityArchivalTimeLimit dynamicconfig.DurationPropertyFn
// ReplicatorQueueProcessor settings
ReplicatorTaskBatchSize dynamicconfig.IntPropertyFn
ReplicatorTaskWorkerCount dynamicconfig.IntPropertyFn
ReplicatorTaskMaxRetryCount dynamicconfig.IntPropertyFn
ReplicatorProcessorMaxPollRPS dynamicconfig.IntPropertyFn
ReplicatorProcessorMaxPollInterval dynamicconfig.DurationPropertyFn
ReplicatorProcessorMaxPollIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
ReplicatorProcessorUpdateAckInterval dynamicconfig.DurationPropertyFn
ReplicatorProcessorUpdateAckIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
ReplicatorProcessorRedispatchInterval dynamicconfig.DurationPropertyFn
ReplicatorProcessorRedispatchIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
ReplicatorProcessorMaxRedispatchQueueSize dynamicconfig.IntPropertyFn
ReplicatorProcessorEnablePriorityTaskProcessor dynamicconfig.BoolPropertyFn
ReplicatorProcessorFetchTasksBatchSize dynamicconfig.IntPropertyFn
// Persistence settings
ExecutionMgrNumConns dynamicconfig.IntPropertyFn
HistoryMgrNumConns dynamicconfig.IntPropertyFn
// System Limits
MaximumBufferedEventsBatch dynamicconfig.IntPropertyFn
MaximumSignalsPerExecution dynamicconfig.IntPropertyFnWithNamespaceFilter
// ShardUpdateMinInterval the minimal time interval which the shard info can be updated
ShardUpdateMinInterval dynamicconfig.DurationPropertyFn
// ShardSyncMinInterval the minimal time interval which the shard info should be sync to remote
ShardSyncMinInterval dynamicconfig.DurationPropertyFn
ShardSyncTimerJitterCoefficient dynamicconfig.FloatPropertyFn
// Time to hold a poll request before returning an empty response
// right now only used by GetMutableState
LongPollExpirationInterval dynamicconfig.DurationPropertyFnWithNamespaceFilter
// encoding the history events
EventEncodingType dynamicconfig.StringPropertyFnWithNamespaceFilter
// whether or not using ParentClosePolicy
EnableParentClosePolicy dynamicconfig.BoolPropertyFnWithNamespaceFilter
// whether or not enable system workers for processing parent close policy task
EnableParentClosePolicyWorker dynamicconfig.BoolPropertyFn
// parent close policy will be processed by sys workers(if enabled) if
// the number of children greater than or equal to this threshold
ParentClosePolicyThreshold dynamicconfig.IntPropertyFnWithNamespaceFilter
// total number of parentClosePolicy system workflows
NumParentClosePolicySystemWorkflows dynamicconfig.IntPropertyFn
// Archival settings
NumArchiveSystemWorkflows dynamicconfig.IntPropertyFn
ArchiveRequestRPS dynamicconfig.IntPropertyFn
// Size limit related settings
BlobSizeLimitError dynamicconfig.IntPropertyFnWithNamespaceFilter
BlobSizeLimitWarn dynamicconfig.IntPropertyFnWithNamespaceFilter
HistorySizeLimitError dynamicconfig.IntPropertyFnWithNamespaceFilter
HistorySizeLimitWarn dynamicconfig.IntPropertyFnWithNamespaceFilter
HistoryCountLimitError dynamicconfig.IntPropertyFnWithNamespaceFilter
HistoryCountLimitWarn dynamicconfig.IntPropertyFnWithNamespaceFilter
// ValidSearchAttributes is legal indexed keys that can be used in list APIs
ValidSearchAttributes dynamicconfig.MapPropertyFn
SearchAttributesNumberOfKeysLimit dynamicconfig.IntPropertyFnWithNamespaceFilter
SearchAttributesSizeOfValueLimit dynamicconfig.IntPropertyFnWithNamespaceFilter
SearchAttributesTotalSizeLimit dynamicconfig.IntPropertyFnWithNamespaceFilter
// DefaultActivityRetryOptions specifies the out-of-box retry policy if
// none is configured on the Activity by the user.
DefaultActivityRetryPolicy dynamicconfig.MapPropertyFnWithNamespaceFilter
// DefaultWorkflowRetryPolicy specifies the out-of-box retry policy for
// any unset fields on a RetryPolicy configured on a Workflow
DefaultWorkflowRetryPolicy dynamicconfig.MapPropertyFnWithNamespaceFilter
// Workflow task settings
// StickyTTL is to expire a sticky taskqueue if no update more than this duration
// TODO https://go.temporal.io/server/issues/2357
StickyTTL dynamicconfig.DurationPropertyFnWithNamespaceFilter
// DefaultWorkflowTaskTimeout the default workflow task timeout
DefaultWorkflowTaskTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter
// WorkflowTaskHeartbeatTimeout is to timeout behavior of: RespondWorkflowTaskComplete with ForceCreateNewWorkflowTask == true without any workflow tasks
// So that workflow task will be scheduled to another worker(by clear stickyness)
WorkflowTaskHeartbeatTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter
// The execution timeout a workflow execution defaults to if not specified
DefaultWorkflowExecutionTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter
// The run timeout a workflow run defaults to if not specified
DefaultWorkflowRunTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter
// Maximum workflow execution timeout permitted by the service
MaxWorkflowExecutionTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter
// Maximum workflow run timeout permitted by the service
MaxWorkflowRunTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter
// MaxWorkflowTaskTimeout is the maximum allowed value for a workflow task timeout
MaxWorkflowTaskTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter
// The following is used by the new RPC replication stack
ReplicationTaskFetcherParallelism dynamicconfig.IntPropertyFn
ReplicationTaskFetcherAggregationInterval dynamicconfig.DurationPropertyFn
ReplicationTaskFetcherTimerJitterCoefficient dynamicconfig.FloatPropertyFn
ReplicationTaskFetcherErrorRetryWait dynamicconfig.DurationPropertyFn
ReplicationTaskProcessorErrorRetryWait dynamicconfig.DurationPropertyFnWithShardIDFilter
ReplicationTaskProcessorErrorRetryMaxAttempts dynamicconfig.IntPropertyFnWithShardIDFilter
ReplicationTaskProcessorNoTaskRetryWait dynamicconfig.DurationPropertyFnWithShardIDFilter
ReplicationTaskProcessorCleanupInterval dynamicconfig.DurationPropertyFnWithShardIDFilter
ReplicationTaskProcessorCleanupJitterCoefficient dynamicconfig.FloatPropertyFnWithShardIDFilter
EnableKafkaReplication dynamicconfig.BoolPropertyFn
EnableRPCReplication dynamicconfig.BoolPropertyFn
EnableCleanupReplicationTask dynamicconfig.BoolPropertyFn
// The following are used by consistent query
MaxBufferedQueryCount dynamicconfig.IntPropertyFn
// Data integrity check related config knobs
MutableStateChecksumGenProbability dynamicconfig.IntPropertyFnWithNamespaceFilter
MutableStateChecksumVerifyProbability dynamicconfig.IntPropertyFnWithNamespaceFilter
MutableStateChecksumInvalidateBefore dynamicconfig.FloatPropertyFn
//Crocess DC Replication configuration
ReplicationEventsFromCurrentCluster dynamicconfig.BoolPropertyFnWithNamespaceFilter
EnableDropStuckTaskByNamespaceID dynamicconfig.BoolPropertyFnWithNamespaceIDFilter
SkipReapplicationByNamespaceId dynamicconfig.BoolPropertyFnWithNamespaceIDFilter
}
const (
defaultHistoryMaxAutoResetPoints = 20
)
// NewConfig returns new service config with default values
func NewConfig(dc *dynamicconfig.Collection, numberOfShards int, storeType string, isAdvancedVisConfigExist bool) *Config {
cfg := &Config{
NumberOfShards: numberOfShards,
RPS: dc.GetIntProperty(dynamicconfig.HistoryRPS, 3000),
MaxIDLengthLimit: dc.GetIntProperty(dynamicconfig.MaxIDLengthLimit, 1000),
PersistenceMaxQPS: dc.GetIntProperty(dynamicconfig.HistoryPersistenceMaxQPS, 9000),
PersistenceGlobalMaxQPS: dc.GetIntProperty(dynamicconfig.HistoryPersistenceGlobalMaxQPS, 0),
ShutdownDrainDuration: dc.GetDurationProperty(dynamicconfig.HistoryShutdownDrainDuration, 0),
EnableVisibilitySampling: dc.GetBoolProperty(dynamicconfig.EnableVisibilitySampling, true),
VisibilityOpenMaxQPS: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistoryVisibilityOpenMaxQPS, 300),
VisibilityClosedMaxQPS: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistoryVisibilityClosedMaxQPS, 300),
MaxAutoResetPoints: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistoryMaxAutoResetPoints, defaultHistoryMaxAutoResetPoints),
DefaultWorkflowTaskTimeout: dc.GetDurationPropertyFilteredByNamespace(dynamicconfig.DefaultWorkflowTaskTimeout, time.Second*10),
MaxWorkflowTaskTimeout: dc.GetDurationPropertyFilteredByNamespace(dynamicconfig.MaxWorkflowTaskTimeout, time.Second*60),
AdvancedVisibilityWritingMode: dc.GetStringProperty(dynamicconfig.AdvancedVisibilityWritingMode, common.GetDefaultAdvancedVisibilityWritingMode(isAdvancedVisConfigExist)),
EmitShardDiffLog: dc.GetBoolProperty(dynamicconfig.EmitShardDiffLog, false),
HistoryCacheInitialSize: dc.GetIntProperty(dynamicconfig.HistoryCacheInitialSize, 128),
HistoryCacheMaxSize: dc.GetIntProperty(dynamicconfig.HistoryCacheMaxSize, 512),
HistoryCacheTTL: dc.GetDurationProperty(dynamicconfig.HistoryCacheTTL, time.Hour),
EventsCacheInitialSize: dc.GetIntProperty(dynamicconfig.EventsCacheInitialSize, 128),
EventsCacheMaxSize: dc.GetIntProperty(dynamicconfig.EventsCacheMaxSize, 512),
EventsCacheTTL: dc.GetDurationProperty(dynamicconfig.EventsCacheTTL, time.Hour),
RangeSizeBits: 20, // 20 bits for sequencer, 2^20 sequence number for any range
AcquireShardInterval: dc.GetDurationProperty(dynamicconfig.AcquireShardInterval, time.Minute),
AcquireShardConcurrency: dc.GetIntProperty(dynamicconfig.AcquireShardConcurrency, 1),
StandbyClusterDelay: dc.GetDurationProperty(dynamicconfig.StandbyClusterDelay, 5*time.Minute),
StandbyTaskMissingEventsResendDelay: dc.GetDurationProperty(dynamicconfig.StandbyTaskMissingEventsResendDelay, 15*time.Minute),
StandbyTaskMissingEventsDiscardDelay: dc.GetDurationProperty(dynamicconfig.StandbyTaskMissingEventsDiscardDelay, 25*time.Minute),
TaskProcessRPS: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.TaskProcessRPS, 1000),
EnablePriorityTaskProcessor: dc.GetBoolProperty(dynamicconfig.EnablePriorityTaskProcessor, false),
TaskSchedulerType: dc.GetIntProperty(dynamicconfig.TaskSchedulerType, int(task.SchedulerTypeWRR)),
TaskSchedulerWorkerCount: dc.GetIntProperty(dynamicconfig.TaskSchedulerWorkerCount, 20),
TaskSchedulerQueueSize: dc.GetIntProperty(dynamicconfig.TaskSchedulerQueueSize, 2000),
TaskSchedulerRoundRobinWeights: dc.GetMapProperty(dynamicconfig.TaskSchedulerRoundRobinWeights, convertWeightsToDynamicConfigValue(defaultTaskPriorityWeight)),
TimerTaskBatchSize: dc.GetIntProperty(dynamicconfig.TimerTaskBatchSize, 100),
TimerTaskWorkerCount: dc.GetIntProperty(dynamicconfig.TimerTaskWorkerCount, 10),
TimerTaskMaxRetryCount: dc.GetIntProperty(dynamicconfig.TimerTaskMaxRetryCount, 100),
TimerProcessorGetFailureRetryCount: dc.GetIntProperty(dynamicconfig.TimerProcessorGetFailureRetryCount, 5),
TimerProcessorCompleteTimerFailureRetryCount: dc.GetIntProperty(dynamicconfig.TimerProcessorCompleteTimerFailureRetryCount, 10),
TimerProcessorUpdateAckInterval: dc.GetDurationProperty(dynamicconfig.TimerProcessorUpdateAckInterval, 30*time.Second),
TimerProcessorUpdateAckIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.TimerProcessorUpdateAckIntervalJitterCoefficient, 0.15),
TimerProcessorCompleteTimerInterval: dc.GetDurationProperty(dynamicconfig.TimerProcessorCompleteTimerInterval, 60*time.Second),
TimerProcessorFailoverMaxPollRPS: dc.GetIntProperty(dynamicconfig.TimerProcessorFailoverMaxPollRPS, 1),
TimerProcessorMaxPollRPS: dc.GetIntProperty(dynamicconfig.TimerProcessorMaxPollRPS, 20),
TimerProcessorMaxPollInterval: dc.GetDurationProperty(dynamicconfig.TimerProcessorMaxPollInterval, 5*time.Minute),
TimerProcessorMaxPollIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.TimerProcessorMaxPollIntervalJitterCoefficient, 0.15),
TimerProcessorRedispatchInterval: dc.GetDurationProperty(dynamicconfig.TimerProcessorRedispatchInterval, 5*time.Second),
TimerProcessorRedispatchIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.TimerProcessorRedispatchIntervalJitterCoefficient, 0.15),
TimerProcessorMaxRedispatchQueueSize: dc.GetIntProperty(dynamicconfig.TimerProcessorMaxRedispatchQueueSize, 10000),
TimerProcessorEnablePriorityTaskProcessor: dc.GetBoolProperty(dynamicconfig.TimerProcessorEnablePriorityTaskProcessor, false),
TimerProcessorMaxTimeShift: dc.GetDurationProperty(dynamicconfig.TimerProcessorMaxTimeShift, 1*time.Second),
TimerProcessorHistoryArchivalSizeLimit: dc.GetIntProperty(dynamicconfig.TimerProcessorHistoryArchivalSizeLimit, 500*1024),
TimerProcessorArchivalTimeLimit: dc.GetDurationProperty(dynamicconfig.TimerProcessorArchivalTimeLimit, 1*time.Second),
TransferTaskBatchSize: dc.GetIntProperty(dynamicconfig.TransferTaskBatchSize, 100),
TransferProcessorFailoverMaxPollRPS: dc.GetIntProperty(dynamicconfig.TransferProcessorFailoverMaxPollRPS, 1),
TransferProcessorMaxPollRPS: dc.GetIntProperty(dynamicconfig.TransferProcessorMaxPollRPS, 20),
TransferTaskWorkerCount: dc.GetIntProperty(dynamicconfig.TransferTaskWorkerCount, 10),
TransferTaskMaxRetryCount: dc.GetIntProperty(dynamicconfig.TransferTaskMaxRetryCount, 100),
TransferProcessorCompleteTransferFailureRetryCount: dc.GetIntProperty(dynamicconfig.TransferProcessorCompleteTransferFailureRetryCount, 10),
TransferProcessorMaxPollInterval: dc.GetDurationProperty(dynamicconfig.TransferProcessorMaxPollInterval, 1*time.Minute),
TransferProcessorMaxPollIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.TransferProcessorMaxPollIntervalJitterCoefficient, 0.15),
TransferProcessorUpdateAckInterval: dc.GetDurationProperty(dynamicconfig.TransferProcessorUpdateAckInterval, 30*time.Second),
TransferProcessorUpdateAckIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.TransferProcessorUpdateAckIntervalJitterCoefficient, 0.15),
TransferProcessorCompleteTransferInterval: dc.GetDurationProperty(dynamicconfig.TransferProcessorCompleteTransferInterval, 60*time.Second),
TransferProcessorRedispatchInterval: dc.GetDurationProperty(dynamicconfig.TransferProcessorRedispatchInterval, 5*time.Second),
TransferProcessorRedispatchIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.TransferProcessorRedispatchIntervalJitterCoefficient, 0.15),
TransferProcessorMaxRedispatchQueueSize: dc.GetIntProperty(dynamicconfig.TransferProcessorMaxRedispatchQueueSize, 10000),
TransferProcessorEnablePriorityTaskProcessor: dc.GetBoolProperty(dynamicconfig.TransferProcessorEnablePriorityTaskProcessor, false),
TransferProcessorVisibilityArchivalTimeLimit: dc.GetDurationProperty(dynamicconfig.TransferProcessorVisibilityArchivalTimeLimit, 200*time.Millisecond),
ReplicatorTaskBatchSize: dc.GetIntProperty(dynamicconfig.ReplicatorTaskBatchSize, 100),
ReplicatorTaskWorkerCount: dc.GetIntProperty(dynamicconfig.ReplicatorTaskWorkerCount, 10),
ReplicatorTaskMaxRetryCount: dc.GetIntProperty(dynamicconfig.ReplicatorTaskMaxRetryCount, 100),
ReplicatorProcessorMaxPollRPS: dc.GetIntProperty(dynamicconfig.ReplicatorProcessorMaxPollRPS, 20),
ReplicatorProcessorMaxPollInterval: dc.GetDurationProperty(dynamicconfig.ReplicatorProcessorMaxPollInterval, 1*time.Minute),
ReplicatorProcessorMaxPollIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.ReplicatorProcessorMaxPollIntervalJitterCoefficient, 0.15),
ReplicatorProcessorUpdateAckInterval: dc.GetDurationProperty(dynamicconfig.ReplicatorProcessorUpdateAckInterval, 5*time.Second),
ReplicatorProcessorUpdateAckIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.ReplicatorProcessorUpdateAckIntervalJitterCoefficient, 0.15),
ReplicatorProcessorRedispatchInterval: dc.GetDurationProperty(dynamicconfig.ReplicatorProcessorRedispatchInterval, 5*time.Second),
ReplicatorProcessorRedispatchIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.ReplicatorProcessorRedispatchIntervalJitterCoefficient, 0.15),
ReplicatorProcessorMaxRedispatchQueueSize: dc.GetIntProperty(dynamicconfig.ReplicatorProcessorMaxRedispatchQueueSize, 10000),
ReplicatorProcessorEnablePriorityTaskProcessor: dc.GetBoolProperty(dynamicconfig.ReplicatorProcessorEnablePriorityTaskProcessor, false),
ReplicatorProcessorFetchTasksBatchSize: dc.GetIntProperty(dynamicconfig.ReplicatorTaskBatchSize, 25),
ExecutionMgrNumConns: dc.GetIntProperty(dynamicconfig.ExecutionMgrNumConns, 50),
HistoryMgrNumConns: dc.GetIntProperty(dynamicconfig.HistoryMgrNumConns, 50),
MaximumBufferedEventsBatch: dc.GetIntProperty(dynamicconfig.MaximumBufferedEventsBatch, 100),
MaximumSignalsPerExecution: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.MaximumSignalsPerExecution, 0),
ShardUpdateMinInterval: dc.GetDurationProperty(dynamicconfig.ShardUpdateMinInterval, 5*time.Minute),
ShardSyncMinInterval: dc.GetDurationProperty(dynamicconfig.ShardSyncMinInterval, 5*time.Minute),
ShardSyncTimerJitterCoefficient: dc.GetFloat64Property(dynamicconfig.TransferProcessorMaxPollIntervalJitterCoefficient, 0.15),
// history client: client/history/client.go set the client timeout 30s
// TODO: Return this value to the client: go.temporal.io/server/issues/294
LongPollExpirationInterval: dc.GetDurationPropertyFilteredByNamespace(dynamicconfig.HistoryLongPollExpirationInterval, time.Second*20),
EventEncodingType: dc.GetStringPropertyFnWithNamespaceFilter(dynamicconfig.DefaultEventEncoding, enumspb.ENCODING_TYPE_PROTO3.String()),
EnableParentClosePolicy: dc.GetBoolPropertyFnWithNamespaceFilter(dynamicconfig.EnableParentClosePolicy, true),
NumParentClosePolicySystemWorkflows: dc.GetIntProperty(dynamicconfig.NumParentClosePolicySystemWorkflows, 10),
EnableParentClosePolicyWorker: dc.GetBoolProperty(dynamicconfig.EnableParentClosePolicyWorker, true),
ParentClosePolicyThreshold: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.ParentClosePolicyThreshold, 10),
NumArchiveSystemWorkflows: dc.GetIntProperty(dynamicconfig.NumArchiveSystemWorkflows, 1000),
ArchiveRequestRPS: dc.GetIntProperty(dynamicconfig.ArchiveRequestRPS, 300), // should be much smaller than frontend RPS
BlobSizeLimitError: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.BlobSizeLimitError, 2*1024*1024),
BlobSizeLimitWarn: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.BlobSizeLimitWarn, 512*1024),
HistorySizeLimitError: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistorySizeLimitError, 200*1024*1024),
HistorySizeLimitWarn: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistorySizeLimitWarn, 50*1024*1024),
HistoryCountLimitError: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistoryCountLimitError, 200*1024),
HistoryCountLimitWarn: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistoryCountLimitWarn, 50*1024),
ThrottledLogRPS: dc.GetIntProperty(dynamicconfig.HistoryThrottledLogRPS, 4),
EnableStickyQuery: dc.GetBoolPropertyFnWithNamespaceFilter(dynamicconfig.EnableStickyQuery, true),
DefaultActivityRetryPolicy: dc.GetMapPropertyFnWithNamespaceFilter(dynamicconfig.DefaultActivityRetryPolicy, common.GetDefaultRetryPolicyConfigOptions()),
DefaultWorkflowRetryPolicy: dc.GetMapPropertyFnWithNamespaceFilter(dynamicconfig.DefaultWorkflowRetryPolicy, common.GetDefaultRetryPolicyConfigOptions()),
ValidSearchAttributes: dc.GetMapProperty(dynamicconfig.ValidSearchAttributes, definition.GetDefaultIndexedKeys()),
SearchAttributesNumberOfKeysLimit: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.SearchAttributesNumberOfKeysLimit, 100),
SearchAttributesSizeOfValueLimit: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.SearchAttributesSizeOfValueLimit, 2*1024),
SearchAttributesTotalSizeLimit: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.SearchAttributesTotalSizeLimit, 40*1024),
StickyTTL: dc.GetDurationPropertyFilteredByNamespace(dynamicconfig.StickyTTL, time.Hour*24*365),
WorkflowTaskHeartbeatTimeout: dc.GetDurationPropertyFilteredByNamespace(dynamicconfig.WorkflowTaskHeartbeatTimeout, time.Minute*30),
DefaultWorkflowExecutionTimeout: dc.GetDurationPropertyFilteredByNamespace(dynamicconfig.DefaultWorkflowExecutionTimeout, time.Hour*24*365*10),
DefaultWorkflowRunTimeout: dc.GetDurationPropertyFilteredByNamespace(dynamicconfig.DefaultWorkflowRunTimeout, time.Hour*24*365*10),
MaxWorkflowExecutionTimeout: dc.GetDurationPropertyFilteredByNamespace(dynamicconfig.MaxWorkflowExecutionTimeout, time.Hour*24*365*10),
MaxWorkflowRunTimeout: dc.GetDurationPropertyFilteredByNamespace(dynamicconfig.MaxWorkflowRunTimeout, time.Hour*24*365*10),
ReplicationTaskFetcherParallelism: dc.GetIntProperty(dynamicconfig.ReplicationTaskFetcherParallelism, 1),
ReplicationTaskFetcherAggregationInterval: dc.GetDurationProperty(dynamicconfig.ReplicationTaskFetcherAggregationInterval, 2*time.Second),
ReplicationTaskFetcherTimerJitterCoefficient: dc.GetFloat64Property(dynamicconfig.ReplicationTaskFetcherTimerJitterCoefficient, 0.15),
ReplicationTaskFetcherErrorRetryWait: dc.GetDurationProperty(dynamicconfig.ReplicationTaskFetcherErrorRetryWait, time.Second),
ReplicationTaskProcessorErrorRetryWait: dc.GetDurationPropertyFilteredByShardID(dynamicconfig.ReplicationTaskProcessorErrorRetryWait, time.Second),
ReplicationTaskProcessorErrorRetryMaxAttempts: dc.GetIntPropertyFilteredByShardID(dynamicconfig.ReplicationTaskProcessorErrorRetryMaxAttempts, 20),
ReplicationTaskProcessorNoTaskRetryWait: dc.GetDurationPropertyFilteredByShardID(dynamicconfig.ReplicationTaskProcessorNoTaskInitialWait, 2*time.Second),
ReplicationTaskProcessorCleanupInterval: dc.GetDurationPropertyFilteredByShardID(dynamicconfig.ReplicationTaskProcessorCleanupInterval, 1*time.Minute),
ReplicationTaskProcessorCleanupJitterCoefficient: dc.GetFloat64PropertyFilteredByShardID(dynamicconfig.ReplicationTaskProcessorCleanupJitterCoefficient, 0.15),
EnableRPCReplication: dc.GetBoolProperty(dynamicconfig.HistoryEnableRPCReplication, false),
EnableKafkaReplication: dc.GetBoolProperty(dynamicconfig.HistoryEnableKafkaReplication, true),
EnableCleanupReplicationTask: dc.GetBoolProperty(dynamicconfig.HistoryEnableCleanupReplicationTask, true),
MaxBufferedQueryCount: dc.GetIntProperty(dynamicconfig.MaxBufferedQueryCount, 1),
MutableStateChecksumGenProbability: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.MutableStateChecksumGenProbability, 0),
MutableStateChecksumVerifyProbability: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.MutableStateChecksumVerifyProbability, 0),
MutableStateChecksumInvalidateBefore: dc.GetFloat64Property(dynamicconfig.MutableStateChecksumInvalidateBefore, 0),
ReplicationEventsFromCurrentCluster: dc.GetBoolPropertyFnWithNamespaceFilter(dynamicconfig.ReplicationEventsFromCurrentCluster, false),
EnableDropStuckTaskByNamespaceID: dc.GetBoolPropertyFnWithNamespaceIDFilter(dynamicconfig.EnableDropStuckTaskByNamespaceID, false),
SkipReapplicationByNamespaceId: dc.GetBoolPropertyFnWithNamespaceIDFilter(dynamicconfig.SkipReapplicationByNamespaceId, false),
}
return cfg
}
// GetShardID return the corresponding shard ID for a given namespaceID and workflowID pair
func (config *Config) GetShardID(namespaceID, workflowID string) int {
return common.WorkflowIDToHistoryShard(namespaceID, workflowID, config.NumberOfShards)
}
// Service represents the history service
type Service struct {
resource.Resource
status int32
handler *Handler
params *resource.BootstrapParams
config *Config
server *grpc.Server
}
// NewService builds a new history service
func NewService(
params *resource.BootstrapParams,
) (resource.Resource, error) {
serviceConfig := NewConfig(dynamicconfig.NewCollection(params.DynamicConfig, params.Logger),
params.PersistenceConfig.NumHistoryShards,
params.PersistenceConfig.DefaultStoreType(),
params.PersistenceConfig.IsAdvancedVisibilityConfigExist())
params.PersistenceConfig.HistoryMaxConns = serviceConfig.HistoryMgrNumConns()
params.PersistenceConfig.VisibilityConfig = &config.VisibilityConfig{
VisibilityOpenMaxQPS: serviceConfig.VisibilityOpenMaxQPS,
VisibilityClosedMaxQPS: serviceConfig.VisibilityClosedMaxQPS,
EnableSampling: serviceConfig.EnableVisibilitySampling,
}
visibilityManagerInitializer := func(
persistenceBean persistenceClient.Bean,
logger log.Logger,
) (persistence.VisibilityManager, error) {
visibilityFromDB := persistenceBean.GetVisibilityManager()
var visibilityFromES persistence.VisibilityManager
if params.ESConfig != nil {
visibilityProducer, err := params.MessagingClient.NewProducer(common.VisibilityAppName)
if err != nil {
logger.Fatal("Creating visibility producer failed", tag.Error(err))
}
visibilityFromES = espersistence.NewESVisibilityManager("", nil, nil, visibilityProducer,
params.MetricsClient, logger)
}
return persistence.NewVisibilityManagerWrapper(
visibilityFromDB,
visibilityFromES,
dynamicconfig.GetBoolPropertyFnFilteredByNamespace(false), // history visibility never read
serviceConfig.AdvancedVisibilityWritingMode,
), nil
}
serviceResource, err := resource.New(
params,
common.HistoryServiceName,
serviceConfig.PersistenceMaxQPS,
serviceConfig.PersistenceGlobalMaxQPS,
serviceConfig.ThrottledLogRPS,
visibilityManagerInitializer,
)
if err != nil {
return nil, err
}
return &Service{
Resource: serviceResource,
status: common.DaemonStatusInitialized,
params: params,
config: serviceConfig,
}, nil
}
// Start starts the service
func (s *Service) Start() {
if !atomic.CompareAndSwapInt32(&s.status, common.DaemonStatusInitialized, common.DaemonStatusStarted) {
return
}
logger := s.GetLogger()
logger.Info("elastic search config", tag.ESConfig(s.params.ESConfig))
logger.Info("history starting")
s.handler = NewHandler(s.Resource, s.config)
// must start resource first
s.Resource.Start()
s.handler.Start()
opts, err := s.params.RPCFactory.GetInternodeGRPCServerOptions()
if err != nil {
logger.Fatal("creating grpc server options failed", tag.Error(err))
}
opts = append(opts, grpc.UnaryInterceptor(interceptor))
s.server = grpc.NewServer(opts...)
nilCheckHandler := NewNilCheckHandler(s.handler)
historyservice.RegisterHistoryServiceServer(s.server, nilCheckHandler)
healthpb.RegisterHealthServer(s.server, s.handler)
listener := s.GetGRPCListener()
logger.Info("Starting to serve on history listener")
if err := s.server.Serve(listener); err != nil {
logger.Fatal("Failed to serve on history listener", tag.Error(err))
}
}
// Stop stops the service
func (s *Service) Stop() {
if !atomic.CompareAndSwapInt32(&s.status, common.DaemonStatusStarted, common.DaemonStatusStopped) {
return
}
// initiate graceful shutdown :
// 1. remove self from the membership ring
// 2. wait for other members to discover we are going down
// 3. stop acquiring new shards (periodically or based on other membership changes)
// 4. wait for shard ownership to transfer (and inflight requests to drain) while still accepting new requests
// 5. Reject all requests arriving at rpc handler to avoid taking on more work except for RespondXXXCompleted and
// RecordXXStarted APIs - for these APIs, most of the work is already one and rejecting at last stage is
// probably not that desirable. If the shard is closed, these requests will fail anyways.
// 6. wait for grace period
// 7. force stop the whole world and return
const gossipPropagationDelay = 400 * time.Millisecond
const shardOwnershipTransferDelay = 5 * time.Second
const gracePeriod = 2 * time.Second
remainingTime := s.config.ShutdownDrainDuration()
s.GetLogger().Info("ShutdownHandler: Evicting self from membership ring")
s.GetMembershipMonitor().EvictSelf()
s.GetLogger().Info("ShutdownHandler: Waiting for others to discover I am unhealthy")
remainingTime = s.sleep(gossipPropagationDelay, remainingTime)
s.GetLogger().Info("ShutdownHandler: Initiating shardController shutdown")
s.handler.controller.PrepareToStop()
s.GetLogger().Info("ShutdownHandler: Waiting for traffic to drain")
remainingTime = s.sleep(shardOwnershipTransferDelay, remainingTime)
s.GetLogger().Info("ShutdownHandler: No longer taking rpc requests")
s.handler.PrepareToStop()
remainingTime = s.sleep(gracePeriod, remainingTime)
// TODO: Change this to GracefulStop when integration tests are refactored.
s.server.Stop()
s.handler.Stop()
s.Resource.Stop()
s.GetLogger().Info("history stopped")
}
func interceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
resp, err := handler(ctx, req)
return resp, serviceerror.ToStatus(err).Err()
}
// sleep sleeps for the minimum of desired and available duration
// returns the remaining available time duration
func (s *Service) sleep(desired time.Duration, available time.Duration) time.Duration {
d := common.MinDuration(desired, available)
if d > 0 {
time.Sleep(d)
}
return available - d
}
| 1 | 10,248 | change size to 50MB criticial, 10MB warn... change count to 50k critical, 5k warn... | temporalio-temporal | go |
@@ -1033,9 +1033,9 @@ public class FacetComponent extends SearchComponent {
}
for (Entry<String,List<NamedList<Object>>> pivotFacetResponseFromShard : pivotFacetResponsesFromShard) {
- PivotFacet masterPivotFacet = fi.pivotFacets.get(pivotFacetResponseFromShard.getKey());
- masterPivotFacet.mergeResponseFromShard(shardNumber, rb, pivotFacetResponseFromShard.getValue());
- masterPivotFacet.removeAllRefinementsForShard(shardNumber);
+ PivotFacet primaryPivotFacet = fi.pivotFacets.get(pivotFacetResponseFromShard.getKey());
+ primaryPivotFacet.mergeResponseFromShard(shardNumber, rb, pivotFacetResponseFromShard.getValue());
+ primaryPivotFacet.removeAllRefinementsForShard(shardNumber);
}
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler.component;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
import org.apache.solr.client.solrj.util.ClientUtils;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.FacetParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.ShardParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.StrUtils;
import org.apache.solr.request.SimpleFacets;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.schema.FieldType;
import org.apache.solr.schema.PointField;
import org.apache.solr.search.QueryParsing;
import org.apache.solr.search.DocSet;
import org.apache.solr.search.SyntaxError;
import org.apache.solr.search.facet.FacetDebugInfo;
import org.apache.solr.util.RTimer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Computes facets -- aggregations with counts of terms or ranges over the whole search results.
*
* @since solr 1.3
*/
@SuppressWarnings("rawtypes")
public class FacetComponent extends SearchComponent {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final String COMPONENT_NAME = "facet";
public static final String FACET_QUERY_KEY = "facet_queries";
public static final String FACET_FIELD_KEY = "facet_fields";
public static final String FACET_RANGES_KEY = "facet_ranges";
public static final String FACET_INTERVALS_KEY = "facet_intervals";
private static final String PIVOT_KEY = "facet_pivot";
private static final String PIVOT_REFINE_PREFIX = "{!"+PivotFacet.REFINE_PARAM+"=";
@Override
public void prepare(ResponseBuilder rb) throws IOException {
if (rb.req.getParams().getBool(FacetParams.FACET, false)) {
rb.setNeedDocSet(true);
rb.doFacets = true;
// Deduplicate facet params
ModifiableSolrParams params = new ModifiableSolrParams();
SolrParams origParams = rb.req.getParams();
Iterator<String> iter = origParams.getParameterNamesIterator();
while (iter.hasNext()) {
String paramName = iter.next();
// Deduplicate the list with LinkedHashSet, but _only_ for facet params.
if (!paramName.startsWith(FacetParams.FACET)) {
params.add(paramName, origParams.getParams(paramName));
continue;
}
HashSet<String> deDupe = new LinkedHashSet<>(Arrays.asList(origParams.getParams(paramName)));
params.add(paramName, deDupe.toArray(new String[deDupe.size()]));
}
rb.req.setParams(params);
// Initialize context
FacetContext.initContext(rb);
}
}
/* Custom facet components can return a custom SimpleFacets object */
protected SimpleFacets newSimpleFacets(SolrQueryRequest req, DocSet docSet, SolrParams params, ResponseBuilder rb) {
return new SimpleFacets(req, docSet, params, rb);
}
/**
* Encapsulates facet ranges and facet queries such that their parameters
* are parsed and cached for efficient re-use.
* <p>
* An instance of this class is initialized and kept in the request context via the static
* method {@link org.apache.solr.handler.component.FacetComponent.FacetContext#initContext(ResponseBuilder)} and
* can be retrieved via {@link org.apache.solr.handler.component.FacetComponent.FacetContext#getFacetContext(SolrQueryRequest)}
* <p>
* This class is used exclusively in a single-node context (i.e. non distributed requests or an individual shard
* request). Also see {@link org.apache.solr.handler.component.FacetComponent.FacetInfo} which is
* dedicated exclusively for merging responses from multiple shards and plays no role during computation of facet
* counts in a single node request.
*
* <b>This API is experimental and subject to change</b>
*
* @see org.apache.solr.handler.component.FacetComponent.FacetInfo
*/
public static class FacetContext {
private static final String FACET_CONTEXT_KEY = "_facet.context";
private final List<RangeFacetRequest> allRangeFacets; // init in constructor
private final List<FacetBase> allQueryFacets; // init in constructor
private final Map<String, List<RangeFacetRequest>> taggedRangeFacets;
private final Map<String, List<FacetBase>> taggedQueryFacets;
/**
* Initializes FacetContext using request parameters and saves it in the request
* context which can be retrieved via {@link #getFacetContext(SolrQueryRequest)}
*
* @param rb the ResponseBuilder object from which the request parameters are read
* and to which the FacetContext object is saved.
*/
public static void initContext(ResponseBuilder rb) {
// Parse facet queries and ranges and put them in the request
// context so that they can be hung under pivots if needed without re-parsing
List<RangeFacetRequest> facetRanges = null;
List<FacetBase> facetQueries = null;
String[] ranges = rb.req.getParams().getParams(FacetParams.FACET_RANGE);
if (ranges != null) {
facetRanges = new ArrayList<>(ranges.length);
for (String range : ranges) {
RangeFacetRequest rangeFacetRequest = new RangeFacetRequest(rb, range);
facetRanges.add(rangeFacetRequest);
}
}
String[] queries = rb.req.getParams().getParams(FacetParams.FACET_QUERY);
if (queries != null) {
facetQueries = new ArrayList<>();
for (String query : queries) {
facetQueries.add(new FacetBase(rb, FacetParams.FACET_QUERY, query));
}
}
rb.req.getContext().put(FACET_CONTEXT_KEY, new FacetContext(facetRanges, facetQueries));
}
private FacetContext(List<RangeFacetRequest> allRangeFacets, List<FacetBase> allQueryFacets) {
// avoid NPEs, set to empty list if parameters are null
this.allRangeFacets = allRangeFacets == null ? Collections.emptyList() : allRangeFacets;
this.allQueryFacets = allQueryFacets == null ? Collections.emptyList() : allQueryFacets;
taggedRangeFacets = new HashMap<>();
for (RangeFacetRequest rf : this.allRangeFacets) {
for (String tag : rf.getTags()) {
List<RangeFacetRequest> list = taggedRangeFacets.get(tag);
if (list == null) {
list = new ArrayList<>(1); // typically just one object
taggedRangeFacets.put(tag, list);
}
list.add(rf);
}
}
taggedQueryFacets = new HashMap<>();
for (FacetBase qf : this.allQueryFacets) {
for (String tag : qf.getTags()) {
List<FacetBase> list = taggedQueryFacets.get(tag);
if (list == null) {
list = new ArrayList<>(1);
taggedQueryFacets.put(tag, list);
}
list.add(qf);
}
}
}
/**
* Return the {@link org.apache.solr.handler.component.FacetComponent.FacetContext} instance
* cached in the request context.
*
* @param req the {@link SolrQueryRequest}
* @return the cached FacetContext instance
* @throws IllegalStateException if no cached FacetContext instance is found in the request context
*/
public static FacetContext getFacetContext(SolrQueryRequest req) throws IllegalStateException {
FacetContext result = (FacetContext) req.getContext().get(FACET_CONTEXT_KEY);
if (null == result) {
throw new IllegalStateException("FacetContext can't be accessed before it's initialized in request context");
}
return result;
}
/**
* @return a {@link List} of {@link RangeFacetRequest} objects each representing a facet.range to be
* computed. Returns an empty list if no facet.range were requested.
*/
public List<RangeFacetRequest> getAllRangeFacetRequests() {
return allRangeFacets;
}
/**
* @return a {@link List} of {@link org.apache.solr.handler.component.FacetComponent.FacetBase} objects
* each representing a facet.query to be computed. Returns an empty list of no facet.query were requested.
*/
public List<FacetBase> getAllQueryFacets() {
return allQueryFacets;
}
/**
* @param tag a String tag usually specified via local param on a facet.pivot
* @return a list of {@link RangeFacetRequest} objects which have been tagged with the given tag.
* Returns an empty list if none found.
*/
public List<RangeFacetRequest> getRangeFacetRequestsForTag(String tag) {
List<RangeFacetRequest> list = taggedRangeFacets.get(tag);
return list == null ? Collections.emptyList() : list;
}
/**
* @param tag a String tag usually specified via local param on a facet.pivot
* @return a list of {@link org.apache.solr.handler.component.FacetComponent.FacetBase} objects which have been
* tagged with the given tag. Returns and empty List if none found.
*/
public List<FacetBase> getQueryFacetsForTag(String tag) {
List<FacetBase> list = taggedQueryFacets.get(tag);
return list == null ? Collections.emptyList() : list;
}
}
/**
* Actually run the query
*/
@Override
public void process(ResponseBuilder rb) throws IOException {
if (rb.doFacets) {
SolrParams params = rb.req.getParams();
SimpleFacets f = newSimpleFacets(rb.req, rb.getResults().docSet, params, rb);
RTimer timer = null;
FacetDebugInfo fdebug = null;
if (rb.isDebug()) {
fdebug = new FacetDebugInfo();
rb.req.getContext().put("FacetDebugInfo-nonJson", fdebug);
timer = new RTimer();
}
NamedList<Object> counts = FacetComponent.getFacetCounts(f, fdebug);
String[] pivots = params.getParams(FacetParams.FACET_PIVOT);
if (!ArrayUtils.isEmpty(pivots)) {
PivotFacetProcessor pivotProcessor
= new PivotFacetProcessor(rb.req, rb.getResults().docSet, params, rb);
SimpleOrderedMap<List<NamedList<Object>>> v
= pivotProcessor.process(pivots);
if (v != null) {
counts.add(PIVOT_KEY, v);
}
}
if (fdebug != null) {
long timeElapsed = (long) timer.getTime();
fdebug.setElapse(timeElapsed);
}
rb.rsp.add("facet_counts", counts);
}
}
public static NamedList<Object> getFacetCounts(SimpleFacets simpleFacets) {
return getFacetCounts(simpleFacets, null);
}
/**
* Looks at various Params to determining if any simple Facet Constraint count
* computations are desired.
*
* @see SimpleFacets#getFacetQueryCounts
* @see SimpleFacets#getFacetFieldCounts
* @see RangeFacetProcessor#getFacetRangeCounts
* @see RangeFacetProcessor#getFacetIntervalCounts
* @see FacetParams#FACET
* @return a NamedList of Facet Count info or null
*/
public static NamedList<Object> getFacetCounts(SimpleFacets simpleFacets, FacetDebugInfo fdebug) {
// if someone called this method, benefit of the doubt: assume true
if (!simpleFacets.getGlobalParams().getBool(FacetParams.FACET, true))
return null;
RangeFacetProcessor rangeFacetProcessor = new RangeFacetProcessor(simpleFacets.getRequest(), simpleFacets.getDocsOrig(), simpleFacets.getGlobalParams(), simpleFacets.getResponseBuilder());
NamedList<Object> counts = new SimpleOrderedMap<>();
try {
counts.add(FACET_QUERY_KEY, simpleFacets.getFacetQueryCounts());
if (fdebug != null) {
FacetDebugInfo fd = new FacetDebugInfo();
fd.putInfoItem("action", "field facet");
fd.setProcessor(simpleFacets.getClass().getSimpleName());
fdebug.addChild(fd);
simpleFacets.setFacetDebugInfo(fd);
final RTimer timer = new RTimer();
counts.add(FACET_FIELD_KEY, simpleFacets.getFacetFieldCounts());
long timeElapsed = (long) timer.getTime();
fd.setElapse(timeElapsed);
} else {
counts.add(FACET_FIELD_KEY, simpleFacets.getFacetFieldCounts());
}
counts.add(FACET_RANGES_KEY, rangeFacetProcessor.getFacetRangeCounts());
counts.add(FACET_INTERVALS_KEY, simpleFacets.getFacetIntervalCounts());
counts.add(SpatialHeatmapFacets.RESPONSE_KEY, simpleFacets.getHeatmapCounts());
} catch (IOException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, e);
} catch (SyntaxError e) {
throw new SolrException(ErrorCode.BAD_REQUEST, e);
}
return counts;
}
private static final String commandPrefix = "{!" + CommonParams.TERMS + "=$";
@Override
public int distributedProcess(ResponseBuilder rb) throws IOException {
if (!rb.doFacets) {
return ResponseBuilder.STAGE_DONE;
}
if (rb.stage != ResponseBuilder.STAGE_GET_FIELDS) {
return ResponseBuilder.STAGE_DONE;
}
// Overlap facet refinement requests (those shards that we need a count
// for particular facet values from), where possible, with
// the requests to get fields (because we know that is the
// only other required phase).
// We do this in distributedProcess so we can look at all of the
// requests in the outgoing queue at once.
for (int shardNum = 0; shardNum < rb.shards.length; shardNum++) {
List<String> distribFieldFacetRefinements = null;
// FieldFacetAdditions
for (DistribFieldFacet dff : rb._facetInfo.facets.values()) {
if (!dff.needRefinements) continue;
List<String> refList = dff._toRefine[shardNum];
if (refList == null || refList.size() == 0) continue;
String key = dff.getKey(); // reuse the same key that was used for the
// main facet
String termsKey = key + "__terms";
String termsVal = StrUtils.join(refList, ',');
String facetCommand;
// add terms into the original facet.field command
// do it via parameter reference to avoid another layer of encoding.
String termsKeyEncoded = ClientUtils.encodeLocalParamVal(termsKey);
if (dff.localParams != null) {
facetCommand = commandPrefix + termsKeyEncoded + " "
+ dff.facetStr.substring(2);
} else {
facetCommand = commandPrefix + termsKeyEncoded + '}' + dff.field;
}
if (distribFieldFacetRefinements == null) {
distribFieldFacetRefinements = new ArrayList<>();
}
distribFieldFacetRefinements.add(facetCommand);
distribFieldFacetRefinements.add(termsKey);
distribFieldFacetRefinements.add(termsVal);
}
if (distribFieldFacetRefinements != null) {
String shard = rb.shards[shardNum];
ShardRequest shardsRefineRequest = null;
boolean newRequest = false;
// try to find a request that is already going out to that shard.
// If nshards becomes too great, we may want to move to hashing for
// better scalability.
for (ShardRequest sreq : rb.outgoing) {
if ((sreq.purpose & ShardRequest.PURPOSE_GET_FIELDS) != 0
&& sreq.shards != null
&& sreq.shards.length == 1
&& sreq.shards[0].equals(shard)) {
shardsRefineRequest = sreq;
break;
}
}
if (shardsRefineRequest == null) {
// we didn't find any other suitable requests going out to that shard,
// so create one ourselves.
newRequest = true;
shardsRefineRequest = new ShardRequest();
shardsRefineRequest.shards = new String[] { rb.shards[shardNum] };
shardsRefineRequest.params = new ModifiableSolrParams(rb.req.getParams());
// don't request any documents
shardsRefineRequest.params.remove(CommonParams.START);
shardsRefineRequest.params.set(CommonParams.ROWS, "0");
}
shardsRefineRequest.purpose |= ShardRequest.PURPOSE_REFINE_FACETS;
shardsRefineRequest.params.set(FacetParams.FACET, "true");
removeMainFacetTypeParams(shardsRefineRequest);
for (int i = 0; i < distribFieldFacetRefinements.size();) {
String facetCommand = distribFieldFacetRefinements.get(i++);
String termsKey = distribFieldFacetRefinements.get(i++);
String termsVal = distribFieldFacetRefinements.get(i++);
shardsRefineRequest.params.add(FacetParams.FACET_FIELD,
facetCommand);
shardsRefineRequest.params.set(termsKey, termsVal);
}
if (newRequest) {
rb.addRequest(this, shardsRefineRequest);
}
}
// PivotFacetAdditions
if (doAnyPivotFacetRefinementRequestsExistForShard(rb._facetInfo, shardNum)) {
enqueuePivotFacetShardRequests(rb, shardNum);
}
} // for shardNum
return ResponseBuilder.STAGE_DONE;
}
public static String[] FACET_TYPE_PARAMS = {
FacetParams.FACET_FIELD, FacetParams.FACET_PIVOT, FacetParams.FACET_QUERY, FacetParams.FACET_DATE,
FacetParams.FACET_RANGE, FacetParams.FACET_INTERVAL, FacetParams.FACET_HEATMAP
};
private void removeMainFacetTypeParams(ShardRequest shardsRefineRequest) {
for (String param : FACET_TYPE_PARAMS) {
shardsRefineRequest.params.remove(param);
}
}
private void enqueuePivotFacetShardRequests(ResponseBuilder rb, int shardNum) {
FacetInfo fi = rb._facetInfo;
ShardRequest shardsRefineRequestPivot = new ShardRequest();
shardsRefineRequestPivot.shards = new String[] {rb.shards[shardNum]};
shardsRefineRequestPivot.params = new ModifiableSolrParams(rb.req.getParams());
// don't request any documents
shardsRefineRequestPivot.params.remove(CommonParams.START);
shardsRefineRequestPivot.params.set(CommonParams.ROWS, "0");
shardsRefineRequestPivot.purpose |= ShardRequest.PURPOSE_REFINE_PIVOT_FACETS;
shardsRefineRequestPivot.params.set(FacetParams.FACET, "true");
removeMainFacetTypeParams(shardsRefineRequestPivot);
shardsRefineRequestPivot.params.set(FacetParams.FACET_PIVOT_MINCOUNT, -1);
shardsRefineRequestPivot.params.remove(FacetParams.FACET_OFFSET);
for (int pivotIndex = 0; pivotIndex < fi.pivotFacets.size(); pivotIndex++) {
String pivotFacetKey = fi.pivotFacets.getName(pivotIndex);
PivotFacet pivotFacet = fi.pivotFacets.getVal(pivotIndex);
List<PivotFacetValue> queuedRefinementsForShard =
pivotFacet.getQueuedRefinements(shardNum);
if ( ! queuedRefinementsForShard.isEmpty() ) {
String fieldsKey = PivotFacet.REFINE_PARAM + fi.pivotRefinementCounter;
String command;
if (pivotFacet.localParams != null) {
command = PIVOT_REFINE_PREFIX + fi.pivotRefinementCounter + " "
+ pivotFacet.facetStr.substring(2);
} else {
command = PIVOT_REFINE_PREFIX + fi.pivotRefinementCounter + "}"
+ pivotFacet.getKey();
}
shardsRefineRequestPivot.params.add(FacetParams.FACET_PIVOT, command);
for (PivotFacetValue refinementValue : queuedRefinementsForShard) {
String refinementStr = PivotFacetHelper
.encodeRefinementValuePath(refinementValue.getValuePath());
shardsRefineRequestPivot.params.add(fieldsKey, refinementStr);
}
}
fi.pivotRefinementCounter++;
}
rb.addRequest(this, shardsRefineRequestPivot);
}
public void modifyRequest(ResponseBuilder rb, SearchComponent who,ShardRequest sreq) {
if (!rb.doFacets) return;
if ((sreq.purpose & ShardRequest.PURPOSE_GET_TOP_IDS) != 0) {
sreq.purpose |= ShardRequest.PURPOSE_GET_FACETS;
FacetInfo fi = rb._facetInfo;
if (fi == null) {
rb._facetInfo = fi = new FacetInfo();
fi.parse(rb.req.getParams(), rb);
}
modifyRequestForFieldFacets(rb, sreq, fi);
modifyRequestForRangeFacets(sreq);
modifyRequestForPivotFacets(rb, sreq, fi.pivotFacets);
SpatialHeatmapFacets.distribModifyRequest(sreq, fi.heatmapFacets);
sreq.params.remove(FacetParams.FACET_MINCOUNT);
sreq.params.remove(FacetParams.FACET_OFFSET);
} else {
// turn off faceting on other requests
sreq.params.set(FacetParams.FACET, "false");
// we could optionally remove faceting params
}
}
// we must get all the range buckets back in order to have coherent lists at the end, see SOLR-6154
private void modifyRequestForRangeFacets(ShardRequest sreq) {
// Collect all the range fields.
final String[] fields = sreq.params.getParams(FacetParams.FACET_RANGE);
if (fields != null) {
for (String field : fields) {
sreq.params.set("f." + field + ".facet.mincount", "0");
}
}
}
private void modifyRequestForFieldFacets(ResponseBuilder rb, ShardRequest sreq, FacetInfo fi) {
for (DistribFieldFacet dff : fi.facets.values()) {
String paramStart = "f." + dff.field + '.';
sreq.params.remove(paramStart + FacetParams.FACET_MINCOUNT);
sreq.params.remove(paramStart + FacetParams.FACET_OFFSET);
dff.initialLimit = dff.limit <= 0 ? dff.limit : dff.offset + dff.limit;
if (dff.sort.equals(FacetParams.FACET_SORT_COUNT)) {
if (dff.limit > 0) {
// set the initial limit higher to increase accuracy
dff.initialLimit = doOverRequestMath(dff.initialLimit, dff.overrequestRatio,
dff.overrequestCount);
}
dff.initialMincount = Math.min(dff.minCount, 1);
} else {
// we're sorting by index order.
// if minCount==0, we should always be able to get accurate results w/o
// over-requesting or refining
// if minCount==1, we should be able to get accurate results w/o
// over-requesting, but we'll need to refine
// if minCount==n (>1), we can set the initialMincount to
// minCount/nShards, rounded up.
// For example, we know that if minCount=10 and we have 3 shards, then
// at least one shard must have a count of 4 for the term
// For the minCount>1 case, we can generate too short of a list (miss
// terms at the end of the list) unless limit==-1
// For example: each shard could produce a list of top 10, but some of
// those could fail to make it into the combined list (i.e.
// we needed to go beyond the top 10 to generate the top 10 combined).
// Overrequesting can help a little here, but not as
// much as when sorting by count.
if (dff.minCount <= 1) {
dff.initialMincount = dff.minCount;
} else {
dff.initialMincount = (int) Math.ceil((double) dff.minCount / rb.slices.length);
}
}
// Currently this is for testing only and allows overriding of the
// facet.limit set to the shards
dff.initialLimit = rb.req.getParams().getInt("facet.shard.limit", dff.initialLimit);
sreq.params.set(paramStart + FacetParams.FACET_LIMIT, dff.initialLimit);
sreq.params.set(paramStart + FacetParams.FACET_MINCOUNT, dff.initialMincount);
}
}
private void modifyRequestForPivotFacets(ResponseBuilder rb,
ShardRequest sreq,
SimpleOrderedMap<PivotFacet> pivotFacets) {
for (Entry<String,PivotFacet> pfwEntry : pivotFacets) {
PivotFacet pivot = pfwEntry.getValue();
for (String pivotField : StrUtils.splitSmart(pivot.getKey(), ',')) {
modifyRequestForIndividualPivotFacets(rb, sreq, pivotField);
}
}
}
private void modifyRequestForIndividualPivotFacets(ResponseBuilder rb, ShardRequest sreq,
String fieldToOverRequest) {
final SolrParams originalParams = rb.req.getParams();
final String paramStart = "f." + fieldToOverRequest + ".";
final int requestedLimit = originalParams.getFieldInt(fieldToOverRequest,
FacetParams.FACET_LIMIT, 100);
sreq.params.remove(paramStart + FacetParams.FACET_LIMIT);
final int offset = originalParams.getFieldInt(fieldToOverRequest,
FacetParams.FACET_OFFSET, 0);
sreq.params.remove(paramStart + FacetParams.FACET_OFFSET);
final double overRequestRatio = originalParams.getFieldDouble
(fieldToOverRequest, FacetParams.FACET_OVERREQUEST_RATIO, 1.5);
sreq.params.remove(paramStart + FacetParams.FACET_OVERREQUEST_RATIO);
final int overRequestCount = originalParams.getFieldInt
(fieldToOverRequest, FacetParams.FACET_OVERREQUEST_COUNT, 10);
sreq.params.remove(paramStart + FacetParams.FACET_OVERREQUEST_COUNT);
final int requestedMinCount = originalParams.getFieldInt
(fieldToOverRequest, FacetParams.FACET_PIVOT_MINCOUNT, 1);
sreq.params.remove(paramStart + FacetParams.FACET_PIVOT_MINCOUNT);
final String defaultSort = (requestedLimit > 0)
? FacetParams.FACET_SORT_COUNT : FacetParams.FACET_SORT_INDEX;
final String sort = originalParams.getFieldParam
(fieldToOverRequest, FacetParams.FACET_SORT, defaultSort);
int shardLimit = requestedLimit + offset;
int shardMinCount = Math.min(requestedMinCount, 1);
// per-shard mincount & overrequest
if ( FacetParams.FACET_SORT_INDEX.equals(sort) &&
1 < requestedMinCount &&
0 < requestedLimit) {
// We can divide the mincount by num shards rounded up, because unless
// a single shard has at least that many it can't compete...
shardMinCount = (int) Math.ceil((double) requestedMinCount / rb.slices.length);
// ...but we still need to overrequest to reduce chances of missing something
shardLimit = doOverRequestMath(shardLimit, overRequestRatio, overRequestCount);
// (for mincount <= 1, no overrequest needed)
} else if ( FacetParams.FACET_SORT_COUNT.equals(sort) ) {
if ( 0 < requestedLimit ) {
shardLimit = doOverRequestMath(shardLimit, overRequestRatio, overRequestCount);
}
}
sreq.params.set(paramStart + FacetParams.FACET_LIMIT, shardLimit);
sreq.params.set(paramStart + FacetParams.FACET_PIVOT_MINCOUNT, shardMinCount);
}
private int doOverRequestMath(int limit, double ratio, int count) {
// NOTE: normally, "1.0F < ratio"
//
// if the user chooses a ratio < 1, we allow it and don't "bottom out" at
// the original limit until *after* we've also added the count.
int adjustedLimit = (int) (limit * ratio) + count;
return Math.max(limit, adjustedLimit);
}
@Override
public void handleResponses(ResponseBuilder rb, ShardRequest sreq) {
if (!rb.doFacets) return;
if ((sreq.purpose & ShardRequest.PURPOSE_GET_FACETS) != 0) {
countFacets(rb, sreq);
} else {
// at present PURPOSE_REFINE_FACETS and PURPOSE_REFINE_PIVOT_FACETS
// don't co-exist in individual requests, but don't assume that
// will always be the case
if ((sreq.purpose & ShardRequest.PURPOSE_REFINE_FACETS) != 0) {
refineFacets(rb, sreq);
}
if ((sreq.purpose & ShardRequest.PURPOSE_REFINE_PIVOT_FACETS) != 0) {
refinePivotFacets(rb, sreq);
}
}
}
private void countFacets(ResponseBuilder rb, ShardRequest sreq) {
FacetInfo fi = rb._facetInfo;
for (ShardResponse srsp : sreq.responses) {
int shardNum = rb.getShardNum(srsp.getShard());
NamedList facet_counts = null;
try {
facet_counts = (NamedList) srsp.getSolrResponse().getResponse().get("facet_counts");
if (facet_counts==null) {
NamedList<?> responseHeader = (NamedList<?>)srsp.getSolrResponse().getResponse().get("responseHeader");
if (Boolean.TRUE.equals(responseHeader.getBooleanArg(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY))) {
continue;
} else {
log.warn("corrupted response on {} : {}", srsp.getShardRequest(), srsp.getSolrResponse());
throw new SolrException(ErrorCode.SERVER_ERROR,
"facet_counts is absent in response from " + srsp.getNodeName() +
", but "+SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY+" hasn't been responded");
}
}
} catch (Exception ex) {
if (ShardParams.getShardsTolerantAsBool(rb.req.getParams())) {
continue; // looks like a shard did not return anything
}
throw new SolrException(ErrorCode.SERVER_ERROR,
"Unable to read facet info for shard: " + srsp.getShard(), ex);
}
// handle facet queries
NamedList facet_queries = (NamedList) facet_counts.get("facet_queries");
if (facet_queries != null) {
for (int i = 0; i < facet_queries.size(); i++) {
String returnedKey = facet_queries.getName(i);
long count = ((Number) facet_queries.getVal(i)).longValue();
QueryFacet qf = fi.queryFacets.get(returnedKey);
qf.count += count;
}
}
// step through each facet.field, adding results from this shard
NamedList facet_fields = (NamedList) facet_counts.get("facet_fields");
if (facet_fields != null) {
for (DistribFieldFacet dff : fi.facets.values()) {
dff.add(shardNum, (NamedList) facet_fields.get(dff.getKey()), dff.initialLimit);
}
}
// Distributed facet_ranges
@SuppressWarnings("unchecked")
SimpleOrderedMap<SimpleOrderedMap<Object>> rangesFromShard = (SimpleOrderedMap<SimpleOrderedMap<Object>>)
facet_counts.get("facet_ranges");
if (rangesFromShard != null) {
RangeFacetRequest.DistribRangeFacet.mergeFacetRangesFromShardResponse(fi.rangeFacets, rangesFromShard);
}
// Distributed facet_intervals
doDistribIntervals(fi, facet_counts);
// Distributed facet_pivots - this is just the per shard collection,
// refinement reqs still needed (below) once we've considered every shard
doDistribPivots(rb, shardNum, facet_counts);
// Distributed facet_heatmaps
SpatialHeatmapFacets.distribHandleResponse(fi.heatmapFacets, facet_counts);
} // end for-each-response-in-shard-request...
// refine each pivot based on the new shard data
for (Entry<String,PivotFacet> pivotFacet : fi.pivotFacets) {
pivotFacet.getValue().queuePivotRefinementRequests();
}
//
// This code currently assumes that there will be only a single
// request ((with responses from all shards) sent out to get facets...
// otherwise we would need to wait until all facet responses were received.
//
for (DistribFieldFacet dff : fi.facets.values()) {
// no need to check these facets for refinement
if (dff.initialLimit <= 0 && dff.initialMincount <= 1) continue;
// only other case where index-sort doesn't need refinement is if minCount==0
if (dff.minCount <= 1 && dff.sort.equals(FacetParams.FACET_SORT_INDEX)) continue;
@SuppressWarnings("unchecked") // generic array's are annoying
List<String>[] tmp = (List<String>[]) new List[rb.shards.length];
dff._toRefine = tmp;
ShardFacetCount[] counts = dff.getCountSorted();
int ntop = Math.min(counts.length,
dff.limit >= 0 ? dff.offset + dff.limit : Integer.MAX_VALUE);
long smallestCount = counts.length == 0 ? 0 : counts[ntop - 1].count;
for (int i = 0; i < counts.length; i++) {
ShardFacetCount sfc = counts[i];
boolean needRefinement = false;
if (i < ntop) {
// automatically flag the top values for refinement
// this should always be true for facet.sort=index
needRefinement = true;
} else {
// this logic should only be invoked for facet.sort=index (for now)
// calculate the maximum value that this term may have
// and if it is >= smallestCount, then flag for refinement
long maxCount = sfc.count;
for (int shardNum = 0; shardNum < rb.shards.length; shardNum++) {
FixedBitSet fbs = dff.counted[shardNum];
// fbs can be null if a shard request failed
if (fbs != null && (sfc.termNum >= fbs.length() || !fbs.get(sfc.termNum))) {
// if missing from this shard, add the max it could be
maxCount += dff.maxPossible(shardNum);
}
}
if (maxCount >= smallestCount) {
// TODO: on a tie, we could check the term values
needRefinement = true;
}
}
if (needRefinement) {
// add a query for each shard missing the term that needs refinement
for (int shardNum = 0; shardNum < rb.shards.length; shardNum++) {
FixedBitSet fbs = dff.counted[shardNum];
// fbs can be null if a shard request failed
if (fbs != null &&
(sfc.termNum >= fbs.length() || !fbs.get(sfc.termNum)) &&
dff.maxPossible(shardNum) > 0) {
dff.needRefinements = true;
List<String> lst = dff._toRefine[shardNum];
if (lst == null) {
lst = dff._toRefine[shardNum] = new ArrayList<>();
}
lst.add(sfc.name);
}
}
}
}
}
removeFieldFacetsUnderLimits(rb);
removeRangeFacetsUnderLimits(rb);
removeQueryFacetsUnderLimits(rb);
}
private void removeQueryFacetsUnderLimits(ResponseBuilder rb) {
if (rb.stage != ResponseBuilder.STAGE_EXECUTE_QUERY) {
return;
}
FacetInfo fi = rb._facetInfo;
Map<String, QueryFacet> query_facets = fi.queryFacets;
if (query_facets == null) {
return;
}
LinkedHashMap<String, QueryFacet> newQueryFacets = new LinkedHashMap<>();
// The
int minCount = rb.req.getParams().getInt(FacetParams.FACET_MINCOUNT, 0);
boolean replace = false;
for (Map.Entry<String, QueryFacet> ent : query_facets.entrySet()) {
if (ent.getValue().count >= minCount) {
newQueryFacets.put(ent.getKey(), ent.getValue());
} else {
if (log.isTraceEnabled()) {
log.trace("Removing facetQuery/key: {}/{} mincount={}", ent.getKey(), ent.getValue(), minCount);
}
replace = true;
}
}
if (replace) {
fi.queryFacets = newQueryFacets;
}
}
private void removeRangeFacetsUnderLimits(ResponseBuilder rb) {
if (rb.stage != ResponseBuilder.STAGE_EXECUTE_QUERY) {
return;
}
FacetInfo fi = rb._facetInfo;
for (Map.Entry<String, RangeFacetRequest.DistribRangeFacet> entry : fi.rangeFacets.entrySet()) {
final String field = entry.getKey();
final RangeFacetRequest.DistribRangeFacet rangeFacet = entry.getValue();
int minCount = rb.req.getParams().getFieldInt(field, FacetParams.FACET_MINCOUNT, 0);
if (minCount == 0) {
continue;
}
rangeFacet.removeRangeFacetsUnderLimits(minCount);
}
}
private void removeFieldFacetsUnderLimits(ResponseBuilder rb) {
if (rb.stage != ResponseBuilder.STAGE_DONE) {
return;
}
FacetInfo fi = rb._facetInfo;
if (fi.facets == null) {
return;
}
// Do field facets
for (Entry<String, DistribFieldFacet> ent : fi.facets.entrySet()) {
String field = ent.getKey();
int minCount = rb.req.getParams().getFieldInt(field, FacetParams.FACET_MINCOUNT, 0);
if (minCount == 0) { // return them all
continue;
}
ent.getValue().respectMinCount(minCount);
}
}
// The implementation below uses the first encountered shard's
// facet_intervals as the basis for subsequent shards' data to be merged.
private void doDistribIntervals(FacetInfo fi, NamedList facet_counts) {
@SuppressWarnings("unchecked")
SimpleOrderedMap<SimpleOrderedMap<Integer>> facet_intervals =
(SimpleOrderedMap<SimpleOrderedMap<Integer>>)
facet_counts.get("facet_intervals");
if (facet_intervals != null) {
for (Map.Entry<String, SimpleOrderedMap<Integer>> entry : facet_intervals) {
final String field = entry.getKey();
SimpleOrderedMap<Integer> existingCounts = fi.intervalFacets.get(field);
if (existingCounts == null) {
// first time we've seen this field, no merging
fi.intervalFacets.add(field, entry.getValue());
} else {
// not the first time, merge current field counts
Iterator<Map.Entry<String, Integer>> newItr = entry.getValue().iterator();
Iterator<Map.Entry<String, Integer>> exItr = existingCounts.iterator();
// all intervals should be returned by each shard, even if they have zero count,
// and in the same order
while (exItr.hasNext()) {
Map.Entry<String, Integer> exItem = exItr.next();
if (!newItr.hasNext()) {
throw new SolrException(ErrorCode.SERVER_ERROR,
"Interval facet shard response missing key: " + exItem.getKey());
}
Map.Entry<String, Integer> newItem = newItr.next();
if (!newItem.getKey().equals(exItem.getKey())) {
throw new SolrException(ErrorCode.SERVER_ERROR,
"Interval facet shard response has extra key: " + newItem.getKey());
}
exItem.setValue(exItem.getValue() + newItem.getValue());
}
if (newItr.hasNext()) {
throw new SolrException(ErrorCode.SERVER_ERROR,
"Interval facet shard response has at least one extra key: "
+ newItr.next().getKey());
}
}
}
}
}
private void doDistribPivots(ResponseBuilder rb, int shardNum, NamedList facet_counts) {
@SuppressWarnings("unchecked")
SimpleOrderedMap<List<NamedList<Object>>> facet_pivot
= (SimpleOrderedMap<List<NamedList<Object>>>) facet_counts.get(PIVOT_KEY);
if (facet_pivot != null) {
for (Map.Entry<String,List<NamedList<Object>>> pivot : facet_pivot) {
final String pivotName = pivot.getKey();
PivotFacet facet = rb._facetInfo.pivotFacets.get(pivotName);
facet.mergeResponseFromShard(shardNum, rb, pivot.getValue());
}
}
}
private void refineFacets(ResponseBuilder rb, ShardRequest sreq) {
FacetInfo fi = rb._facetInfo;
for (ShardResponse srsp : sreq.responses) {
// int shardNum = rb.getShardNum(srsp.shard);
NamedList facet_counts = (NamedList) srsp.getSolrResponse().getResponse().get("facet_counts");
NamedList facet_fields = (NamedList) facet_counts.get("facet_fields");
if (facet_fields == null) continue; // this can happen when there's an exception
for (int i = 0; i < facet_fields.size(); i++) {
String key = facet_fields.getName(i);
DistribFieldFacet dff = fi.facets.get(key);
if (dff == null) continue;
NamedList shardCounts = (NamedList) facet_fields.getVal(i);
for (int j = 0; j < shardCounts.size(); j++) {
String name = shardCounts.getName(j);
long count = ((Number) shardCounts.getVal(j)).longValue();
ShardFacetCount sfc = dff.counts.get(name);
if (sfc == null) {
// we got back a term we didn't ask for?
log.error("Unexpected term returned for facet refining. key='{}' term='{}'\n\trequest params={}\n\ttoRefine={}\n\tresponse={}"
, key, name, sreq.params, dff._toRefine, shardCounts);
continue;
}
sfc.count += count;
}
}
}
}
private void refinePivotFacets(ResponseBuilder rb, ShardRequest sreq) {
// This is after the shard has returned the refinement request
FacetInfo fi = rb._facetInfo;
for (ShardResponse srsp : sreq.responses) {
int shardNumber = rb.getShardNum(srsp.getShard());
NamedList facetCounts = (NamedList) srsp.getSolrResponse().getResponse().get("facet_counts");
@SuppressWarnings("unchecked")
NamedList<List<NamedList<Object>>> pivotFacetResponsesFromShard
= (NamedList<List<NamedList<Object>>>) facetCounts.get(PIVOT_KEY);
if (null == pivotFacetResponsesFromShard) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"No pivot refinement response from shard: " + srsp.getShard());
}
for (Entry<String,List<NamedList<Object>>> pivotFacetResponseFromShard : pivotFacetResponsesFromShard) {
PivotFacet masterPivotFacet = fi.pivotFacets.get(pivotFacetResponseFromShard.getKey());
masterPivotFacet.mergeResponseFromShard(shardNumber, rb, pivotFacetResponseFromShard.getValue());
masterPivotFacet.removeAllRefinementsForShard(shardNumber);
}
}
if (allPivotFacetsAreFullyRefined(fi)) {
for (Entry<String,PivotFacet> pf : fi.pivotFacets) {
pf.getValue().queuePivotRefinementRequests();
}
reQueuePivotFacetShardRequests(rb);
}
}
private boolean allPivotFacetsAreFullyRefined(FacetInfo fi) {
for (Entry<String,PivotFacet> pf : fi.pivotFacets) {
if (pf.getValue().isRefinementsRequired()) {
return false;
}
}
return true;
}
private boolean doAnyPivotFacetRefinementRequestsExistForShard(FacetInfo fi,
int shardNum) {
for (int i = 0; i < fi.pivotFacets.size(); i++) {
PivotFacet pf = fi.pivotFacets.getVal(i);
if ( ! pf.getQueuedRefinements(shardNum).isEmpty() ) {
return true;
}
}
return false;
}
private void reQueuePivotFacetShardRequests(ResponseBuilder rb) {
for (int shardNum = 0; shardNum < rb.shards.length; shardNum++) {
if (doAnyPivotFacetRefinementRequestsExistForShard(rb._facetInfo, shardNum)) {
enqueuePivotFacetShardRequests(rb, shardNum);
}
}
}
@Override
public void finishStage(ResponseBuilder rb) {
if (!rb.doFacets || rb.stage != ResponseBuilder.STAGE_GET_FIELDS) return;
// wait until STAGE_GET_FIELDS
// so that "result" is already stored in the response (for aesthetics)
FacetInfo fi = rb._facetInfo;
NamedList<Object> facet_counts = new SimpleOrderedMap<>();
NamedList<Number> facet_queries = new SimpleOrderedMap<>();
facet_counts.add("facet_queries", facet_queries);
for (QueryFacet qf : fi.queryFacets.values()) {
facet_queries.add(qf.getKey(), num(qf.count));
}
NamedList<Object> facet_fields = new SimpleOrderedMap<>();
facet_counts.add("facet_fields", facet_fields);
for (DistribFieldFacet dff : fi.facets.values()) {
// order is important for facet values, so use NamedList
NamedList<Object> fieldCounts = new NamedList<>();
facet_fields.add(dff.getKey(), fieldCounts);
ShardFacetCount[] counts;
boolean countSorted = dff.sort.equals(FacetParams.FACET_SORT_COUNT);
if (countSorted) {
counts = dff.countSorted;
if (counts == null || dff.needRefinements) {
counts = dff.getCountSorted();
}
} else if (dff.sort.equals(FacetParams.FACET_SORT_INDEX)) {
counts = dff.getLexSorted();
} else { // TODO: log error or throw exception?
counts = dff.getLexSorted();
}
if (countSorted) {
int end = dff.limit < 0
? counts.length : Math.min(dff.offset + dff.limit, counts.length);
for (int i = dff.offset; i < end; i++) {
if (counts[i].count < dff.minCount) {
break;
}
fieldCounts.add(counts[i].name, num(counts[i].count));
}
} else {
int off = dff.offset;
int lim = dff.limit >= 0 ? dff.limit : Integer.MAX_VALUE;
// index order...
for (int i = 0; i < counts.length; i++) {
long count = counts[i].count;
if (count < dff.minCount) continue;
if (off > 0) {
off--;
continue;
}
if (lim <= 0) {
break;
}
lim--;
fieldCounts.add(counts[i].name, num(count));
}
}
if (dff.missing) {
fieldCounts.add(null, num(dff.missingCount));
}
}
SimpleOrderedMap<SimpleOrderedMap<Object>> rangeFacetOutput = new SimpleOrderedMap<>();
for (Map.Entry<String, RangeFacetRequest.DistribRangeFacet> entry : fi.rangeFacets.entrySet()) {
String key = entry.getKey();
RangeFacetRequest.DistribRangeFacet value = entry.getValue();
rangeFacetOutput.add(key, value.rangeFacet);
}
facet_counts.add("facet_ranges", rangeFacetOutput);
facet_counts.add("facet_intervals", fi.intervalFacets);
facet_counts.add(SpatialHeatmapFacets.RESPONSE_KEY,
SpatialHeatmapFacets.distribFinish(fi.heatmapFacets, rb));
if (fi.pivotFacets != null && fi.pivotFacets.size() > 0) {
facet_counts.add(PIVOT_KEY, createPivotFacetOutput(rb));
}
rb.rsp.add("facet_counts", facet_counts);
rb._facetInfo = null; // could be big, so release asap
}
private SimpleOrderedMap<List<NamedList<Object>>> createPivotFacetOutput(ResponseBuilder rb) {
SimpleOrderedMap<List<NamedList<Object>>> combinedPivotFacets = new SimpleOrderedMap<>();
for (Entry<String,PivotFacet> entry : rb._facetInfo.pivotFacets) {
String key = entry.getKey();
PivotFacet pivot = entry.getValue();
List<NamedList<Object>> trimmedPivots = pivot.getTrimmedPivotsAsListOfNamedLists();
if (null == trimmedPivots) {
trimmedPivots = Collections.<NamedList<Object>>emptyList();
}
combinedPivotFacets.add(key, trimmedPivots);
}
return combinedPivotFacets;
}
// use <int> tags for smaller facet counts (better back compatibility)
/**
* @param val a primitive long value
* @return an {@link Integer} if the value of the argument is less than {@link Integer#MAX_VALUE}
* else a @{link java.lang.Long}
*/
static Number num(long val) {
if (val < Integer.MAX_VALUE) return (int)val;
else return val;
}
/**
* @param val a {@link java.lang.Long} value
* @return an {@link Integer} if the value of the argument is less than {@link Integer#MAX_VALUE}
* else a @{link java.lang.Long}
*/
static Number num(Long val) {
if (val.longValue() < Integer.MAX_VALUE) return val.intValue();
else return val;
}
/////////////////////////////////////////////
/// SolrInfoBean
////////////////////////////////////////////
@Override
public String getDescription() {
return "Handle Faceting";
}
@Override
public Category getCategory() {
return Category.QUERY;
}
/**
* This class is used exclusively for merging results from each shard
* in a distributed facet request. It plays no role in the computation
* of facet counts inside a single node.
*
* A related class {@link org.apache.solr.handler.component.FacetComponent.FacetContext}
* exists for assisting computation inside a single node.
*
* <b>This API is experimental and subject to change</b>
*
* @see org.apache.solr.handler.component.FacetComponent.FacetContext
*/
public static class FacetInfo {
/**
* Incremented counter used to track the values being refined in a given request.
* This counter is used in conjunction with {@link PivotFacet#REFINE_PARAM} to identify
* which refinement values are associated with which pivots.
*/
int pivotRefinementCounter = 0;
public LinkedHashMap<String,QueryFacet> queryFacets;
public LinkedHashMap<String,DistribFieldFacet> facets;
public SimpleOrderedMap<SimpleOrderedMap<Object>> dateFacets
= new SimpleOrderedMap<>();
public LinkedHashMap<String, RangeFacetRequest.DistribRangeFacet> rangeFacets
= new LinkedHashMap<>();
public SimpleOrderedMap<SimpleOrderedMap<Integer>> intervalFacets
= new SimpleOrderedMap<>();
public SimpleOrderedMap<PivotFacet> pivotFacets
= new SimpleOrderedMap<>();
public LinkedHashMap<String,SpatialHeatmapFacets.HeatmapFacet> heatmapFacets;
void parse(SolrParams params, ResponseBuilder rb) {
queryFacets = new LinkedHashMap<>();
facets = new LinkedHashMap<>();
String[] facetQs = params.getParams(FacetParams.FACET_QUERY);
if (facetQs != null) {
for (String query : facetQs) {
QueryFacet queryFacet = new QueryFacet(rb, query);
queryFacets.put(queryFacet.getKey(), queryFacet);
}
}
String[] facetFs = params.getParams(FacetParams.FACET_FIELD);
if (facetFs != null) {
for (String field : facetFs) {
final DistribFieldFacet ff;
if (params.getFieldBool(field, FacetParams.FACET_EXISTS, false)) {
// cap facet count by 1 with this method
ff = new DistribFacetExistsField(rb, field);
} else {
ff = new DistribFieldFacet(rb, field);
}
facets.put(ff.getKey(), ff);
}
}
// Develop Pivot Facet Information
String[] facetPFs = params.getParams(FacetParams.FACET_PIVOT);
if (facetPFs != null) {
for (String fieldGroup : facetPFs) {
PivotFacet pf = new PivotFacet(rb, fieldGroup);
pivotFacets.add(pf.getKey(), pf);
}
}
heatmapFacets = SpatialHeatmapFacets.distribParse(params, rb);
}
}
/**
* <b>This API is experimental and subject to change</b>
*/
public static class FacetBase {
String facetType; // facet.field, facet.query, etc (make enum?)
String facetStr; // original parameter value of facetStr
String facetOn; // the field or query, absent localParams if appropriate
private String key; // label in the response for the result...
// "foo" for {!key=foo}myfield
SolrParams localParams; // any local params for the facet
private List<String> tags = Collections.emptyList();
private List<String> excludeTags = Collections.emptyList();
private int threadCount = -1;
public FacetBase(ResponseBuilder rb, String facetType, String facetStr) {
this.facetType = facetType;
this.facetStr = facetStr;
try {
this.localParams = QueryParsing.getLocalParams(facetStr,
rb.req.getParams());
} catch (SyntaxError e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
this.facetOn = facetStr;
this.key = facetStr;
if (localParams != null) {
// remove local params unless it's a query
if (!facetType.equals(FacetParams.FACET_QUERY)) {
facetOn = localParams.get(CommonParams.VALUE);
key = facetOn;
}
key = localParams.get(CommonParams.OUTPUT_KEY, key);
String tagStr = localParams.get(CommonParams.TAG);
this.tags = tagStr == null ? Collections.<String>emptyList() : StrUtils.splitSmart(tagStr,',');
String threadStr = localParams.get(CommonParams.THREADS);
this.threadCount = threadStr != null ? Integer.parseInt(threadStr) : -1;
String excludeStr = localParams.get(CommonParams.EXCLUDE);
if (StringUtils.isEmpty(excludeStr)) {
this.excludeTags = Collections.emptyList();
} else {
this.excludeTags = StrUtils.splitSmart(excludeStr,',');
}
}
}
/** returns the key in the response that this facet will be under */
public String getKey() { return key; }
public String getType() { return facetType; }
public List<String> getTags() { return tags; }
public List<String> getExcludeTags() { return excludeTags; }
public int getThreadCount() { return threadCount; }
}
/**
* <b>This API is experimental and subject to change</b>
*/
public static class QueryFacet extends FacetBase {
public long count;
public QueryFacet(ResponseBuilder rb, String facetStr) {
super(rb, FacetParams.FACET_QUERY, facetStr);
}
}
/**
* <b>This API is experimental and subject to change</b>
*/
public static class FieldFacet extends FacetBase {
public String field; // the field to facet on... "myfield" for
// {!key=foo}myfield
public FieldType ftype;
public int offset;
public int limit;
public int minCount;
public String sort;
public boolean missing;
public String prefix;
public long missingCount;
public FieldFacet(ResponseBuilder rb, String facetStr) {
super(rb, FacetParams.FACET_FIELD, facetStr);
fillParams(rb, rb.req.getParams(), facetOn);
}
protected void fillParams(ResponseBuilder rb, SolrParams params, String field) {
this.field = field;
this.ftype = rb.req.getSchema().getFieldTypeNoEx(this.field);
this.offset = params.getFieldInt(field, FacetParams.FACET_OFFSET, 0);
this.limit = params.getFieldInt(field, FacetParams.FACET_LIMIT, 100);
Integer mincount = params.getFieldInt(field, FacetParams.FACET_MINCOUNT);
if (mincount == null) {
Boolean zeros = params.getFieldBool(field, FacetParams.FACET_ZEROS);
// mincount = (zeros!=null && zeros) ? 0 : 1;
mincount = (zeros != null && !zeros) ? 1 : 0;
// current default is to include zeros.
}
this.minCount = mincount;
this.missing = params.getFieldBool(field, FacetParams.FACET_MISSING, false);
// default to sorting by count if there is a limit.
this.sort = params.getFieldParam(field, FacetParams.FACET_SORT,
(limit > 0 ?
FacetParams.FACET_SORT_COUNT
: FacetParams.FACET_SORT_INDEX));
if (this.sort.equals(FacetParams.FACET_SORT_COUNT_LEGACY)) {
this.sort = FacetParams.FACET_SORT_COUNT;
} else if (this.sort.equals(FacetParams.FACET_SORT_INDEX_LEGACY)) {
this.sort = FacetParams.FACET_SORT_INDEX;
}
this.prefix = params.getFieldParam(field, FacetParams.FACET_PREFIX);
}
}
/**
* <b>This API is experimental and subject to change</b>
*/
@SuppressWarnings("rawtypes")
public static class DistribFieldFacet extends FieldFacet {
public List<String>[] _toRefine; // a List<String> of refinements needed,
// one for each shard.
// SchemaField sf; // currently unneeded
// the max possible count for a term appearing on no list
public long missingMaxPossible;
// the max possible count for a missing term for each shard (indexed by
// shardNum)
public long[] missingMax;
// a bitset for each shard, keeping track of which terms seen
public FixedBitSet[] counted;
public HashMap<String,ShardFacetCount> counts = new HashMap<>(128);
public int termNum;
public int initialLimit; // how many terms requested in first phase
public int initialMincount; // mincount param sent to each shard
public double overrequestRatio;
public int overrequestCount;
public boolean needRefinements;
public ShardFacetCount[] countSorted;
DistribFieldFacet(ResponseBuilder rb, String facetStr) {
super(rb, facetStr);
// sf = rb.req.getSchema().getField(field);
missingMax = new long[rb.shards.length];
counted = new FixedBitSet[rb.shards.length];
}
protected void fillParams(ResponseBuilder rb, SolrParams params, String field) {
super.fillParams(rb, params, field);
this.overrequestRatio
= params.getFieldDouble(field, FacetParams.FACET_OVERREQUEST_RATIO, 1.5);
this.overrequestCount
= params.getFieldInt(field, FacetParams.FACET_OVERREQUEST_COUNT, 10);
}
void add(int shardNum, NamedList shardCounts, int numRequested) {
// shardCounts could be null if there was an exception
int sz = shardCounts == null ? 0 : shardCounts.size();
int numReceived = sz;
FixedBitSet terms = new FixedBitSet(termNum + sz);
long last = 0;
for (int i = 0; i < sz; i++) {
String name = shardCounts.getName(i);
long count = ((Number) shardCounts.getVal(i)).longValue();
if (name == null) {
missingCount += count;
numReceived--;
} else {
ShardFacetCount sfc = counts.get(name);
if (sfc == null) {
sfc = new ShardFacetCount();
sfc.name = name;
if (ftype == null) {
sfc.indexed = null;
} else if (ftype.isPointField()) {
sfc.indexed = ((PointField)ftype).toInternalByteRef(sfc.name);
} else {
sfc.indexed = new BytesRef(ftype.toInternal(sfc.name));
}
sfc.termNum = termNum++;
counts.put(name, sfc);
}
incCount(sfc, count);
terms.set(sfc.termNum);
last = count;
}
}
// the largest possible missing term is (initialMincount - 1) if we received
// less than the number requested.
if (numRequested < 0 || numRequested != 0 && numReceived < numRequested) {
last = Math.max(0, initialMincount - 1);
}
missingMaxPossible += last;
missingMax[shardNum] = last;
counted[shardNum] = terms;
}
protected void incCount(ShardFacetCount sfc, long count) {
sfc.count += count;
}
public ShardFacetCount[] getLexSorted() {
ShardFacetCount[] arr
= counts.values().toArray(new ShardFacetCount[counts.size()]);
Arrays.sort(arr, (o1, o2) -> o1.indexed.compareTo(o2.indexed));
countSorted = arr;
return arr;
}
public ShardFacetCount[] getCountSorted() {
ShardFacetCount[] arr
= counts.values().toArray(new ShardFacetCount[counts.size()]);
Arrays.sort(arr, (o1, o2) -> {
if (o2.count < o1.count) return -1;
else if (o1.count < o2.count) return 1;
return o1.indexed.compareTo(o2.indexed);
});
countSorted = arr;
return arr;
}
// returns the max possible value this ShardFacetCount could have for this shard
// (assumes the shard did not report a count for this value)
long maxPossible(int shardNum) {
return missingMax[shardNum];
// TODO: could store the last term in the shard to tell if this term
// comes before or after it. If it comes before, we could subtract 1
}
public void respectMinCount(long minCount) {
HashMap<String, ShardFacetCount> newOne = new HashMap<>();
boolean replace = false;
for (Map.Entry<String, ShardFacetCount> ent : counts.entrySet()) {
if (ent.getValue().count >= minCount) {
newOne.put(ent.getKey(), ent.getValue());
} else {
if (log.isTraceEnabled()) {
log.trace("Removing facet/key: {}/{} mincount={}", ent.getKey(), ent.getValue(), minCount);
}
replace = true;
}
}
if (replace) {
counts = newOne;
}
}
}
/**
* <b>This API is experimental and subject to change</b>
*/
public static class ShardFacetCount {
public String name;
// the indexed form of the name... used for comparisons
public BytesRef indexed;
public long count;
public int termNum; // term number starting at 0 (used in bit arrays)
@Override
public String toString() {
return "{term=" + name + ",termNum=" + termNum + ",count=" + count + "}";
}
}
private static final class DistribFacetExistsField extends DistribFieldFacet {
private DistribFacetExistsField(ResponseBuilder rb, String facetStr) {
super(rb, facetStr);
SimpleFacets.checkMincountOnExists(field, minCount);
}
@Override
protected void incCount(ShardFacetCount sfc, long count) {
if (count>0) {
sfc.count = 1;
}
}
}
}
| 1 | 36,007 | This has nothing to do with master/slave replication. Is this something we want to change too? If so, I guess this is an aggregation? | apache-lucene-solr | java |
@@ -91,9 +91,12 @@ module RSpec
end
def dispatch_specs(run_descriptor)
- fork { run_specs(run_descriptor) }
+ pid = fork { run_specs(run_descriptor) }
# We don't use Process.waitpid here as it was causing bisects to
- # block due to the file descriptor limit on OSX / Linux.
+ # block due to the file descriptor limit on OSX / Linux. We need
+ # to detach the process to avoid having zombie process and consume
+ # slot in the kernel process table.
+ Process.detach(pid)
end
private | 1 | require 'stringio'
RSpec::Support.require_rspec_core "formatters/base_bisect_formatter"
RSpec::Support.require_rspec_core "bisect/utilities"
module RSpec
module Core
module Bisect
# A Bisect runner that runs requested subsets of the suite by forking
# sub-processes. The main process bootstraps RSpec and the application
# environment (including preloading files specified via `--require`) so
# that the individual spec runs do not have to re-pay that cost. Each
# spec run happens in a forked process, ensuring that the spec files are
# not loaded in the main process.
#
# For most projects, bisections that use `ForkRunner` instead of
# `ShellRunner` will finish significantly faster, because the `ShellRunner`
# pays the cost of booting RSpec and the app environment on _every_ run of
# a subset. In contrast, `ForkRunner` pays that cost only once.
#
# However, not all projects can use `ForkRunner`. Obviously, on platforms
# that do not support forking (e.g. Windows), it cannot be used. In addition,
# it can cause problems for some projects that put side-effectful spec
# bootstrapping logic that should run on every spec run directly at the top
# level in a file loaded by `--require`, rather than in a `before(:suite)`
# hook. For example, consider a project that relies on some top-level logic
# in `spec_helper` to boot a Redis server for the test suite, intending the
# Redis bootstrapping to happen on every spec run. With `ShellRunner`, the
# bootstrapping logic will happen for each run of any subset of the suite,
# but for `ForkRunner`, such logic will only get run once, when the
# `RunDispatcher` boots the application environment. This might cause
# problems. The solution is for users to move the bootstrapping logic into
# a `before(:suite)` hook, or use the slower `ShellRunner`.
#
# @private
class ForkRunner
def self.start(shell_command, spec_runner)
instance = new(shell_command, spec_runner)
yield instance
ensure
instance.shutdown
end
def self.name
:fork
end
def initialize(shell_command, spec_runner)
@shell_command = shell_command
@channel = Channel.new
@run_dispatcher = RunDispatcher.new(spec_runner, @channel)
end
def run(locations)
run_descriptor = ExampleSetDescriptor.new(locations, original_results.failed_example_ids)
dispatch_run(run_descriptor)
end
def original_results
@original_results ||= dispatch_run(ExampleSetDescriptor.new(
@shell_command.original_locations, []))
end
def shutdown
@channel.close
end
private
def dispatch_run(run_descriptor)
@run_dispatcher.dispatch_specs(run_descriptor)
@channel.receive.tap do |result|
if result.is_a?(String)
raise BisectFailedError.for_failed_spec_run(result)
end
end
end
# @private
class RunDispatcher
def initialize(runner, channel)
@runner = runner
@channel = channel
@spec_output = StringIO.new
runner.configuration.tap do |c|
c.reset_reporter
c.output_stream = @spec_output
c.error_stream = @spec_output
end
end
def dispatch_specs(run_descriptor)
fork { run_specs(run_descriptor) }
# We don't use Process.waitpid here as it was causing bisects to
# block due to the file descriptor limit on OSX / Linux.
end
private
def run_specs(run_descriptor)
$stdout = $stderr = @spec_output
formatter = CaptureFormatter.new(run_descriptor.failed_example_ids)
@runner.configuration.tap do |c|
c.files_or_directories_to_run = run_descriptor.all_example_ids
c.formatter = formatter
c.load_spec_files
end
# `announce_filters` has the side effect of implementing the logic
# that honors `config.run_all_when_everything_filtered` so we need
# to call it here. When we remove `run_all_when_everything_filtered`
# (slated for RSpec 4), we can remove this call to `announce_filters`.
@runner.world.announce_filters
@runner.run_specs(@runner.world.ordered_example_groups)
latest_run_results = formatter.results
if latest_run_results.nil? || latest_run_results.all_example_ids.empty?
@channel.send(@spec_output.string)
else
@channel.send(latest_run_results)
end
end
end
class CaptureFormatter < Formatters::BaseBisectFormatter
attr_accessor :results
alias_method :notify_results, :results=
end
end
end
end
end
| 1 | 17,750 | Maybe it makes sense to mention that those zombies are not forever, but only up to the moment when the parent process exits? Is that correct according to your observations, @benoittgt ? Please disregard this note if zombies remain after. | rspec-rspec-core | rb |
@@ -543,7 +543,18 @@ def apply_patches():
def start_apigateway(port=None, backend_port=None, asynchronous=None, update_listener=None):
port = port or config.PORT_APIGATEWAY
apply_patches()
- result = start_moto_server(
+
+ # Why here?
+ # 1. The moto mocking has to happen before any usage of botocore, the import will register
+ # a 'before-send-handler' event, thus intercepting the call to aws.
+ # 2. Then we start the specific service mocking, which basically starts the service backend.
+ #
+ from moto import mock_apigateway
+
+ mock = mock_apigateway()
+ mock.start()
+
+ return start_moto_server(
key="apigateway",
name="API Gateway",
asynchronous=asynchronous, | 1 | import json
import logging
import re
from typing import Dict, Optional, Tuple
from urllib.parse import parse_qs, urlparse
from moto.apigateway import models as apigateway_models
from moto.apigateway.exceptions import NoIntegrationDefined, UsagePlanNotFoundException
from moto.apigateway.responses import APIGatewayResponse
from moto.core.utils import camelcase_to_underscores
from localstack import config
from localstack.constants import TEST_AWS_ACCOUNT_ID
from localstack.services.apigateway.helpers import (
TAG_KEY_CUSTOM_ID,
apply_json_patch_safe,
import_api_from_openapi_spec,
)
from localstack.services.infra import start_moto_server
from localstack.utils.common import DelSafeDict, short_uid, str_to_bool, to_str
LOG = logging.getLogger(__name__)
# additional REST API attributes
REST_API_ATTRIBUTES = [
"apiKeySource",
"disableExecuteApiEndpoint",
"minimumCompressionSize",
]
def apply_patches():
def apigateway_models_Stage_init(
self, cacheClusterEnabled=False, cacheClusterSize=None, **kwargs
):
apigateway_models_Stage_init_orig(
self,
cacheClusterEnabled=cacheClusterEnabled,
cacheClusterSize=cacheClusterSize,
**kwargs,
)
if (cacheClusterSize or cacheClusterEnabled) and not self.get("cacheClusterStatus"):
self["cacheClusterStatus"] = "AVAILABLE"
apigateway_models_Stage_init_orig = apigateway_models.Stage.__init__
apigateway_models.Stage.__init__ = apigateway_models_Stage_init
def apigateway_models_backend_delete_method(self, function_id, resource_id, method_type):
resource = self.get_resource(function_id, resource_id)
method = resource.get_method(method_type)
if not method:
return
return resource.resource_methods.pop(method_type)
def apigateway_models_resource_delete_integration(self, method_type):
if method_type in self.resource_methods:
return self.resource_methods[method_type].pop("methodIntegration", {})
return {}
def apigateway_models_Integration_init(
self,
integration_type,
uri,
http_method,
request_templates=None,
pass_through_behavior="WHEN_NO_MATCH",
cache_key_parameters=None,
*args,
**kwargs,
):
if cache_key_parameters is None:
cache_key_parameters = []
apigateway_models_Integration_init_orig(
self,
integration_type=integration_type,
uri=uri,
http_method=http_method,
request_templates=request_templates,
*args,
**kwargs,
)
self["passthroughBehavior"] = pass_through_behavior
self["cacheKeyParameters"] = cache_key_parameters
self["cacheNamespace"] = self.get("cacheNamespace") or short_uid()
# httpMethod not present in response if integration_type is None, verified against AWS
if integration_type == "MOCK":
self["httpMethod"] = None
if request_templates:
self["requestTemplates"] = request_templates
def apigateway_models_backend_put_rest_api(self, function_id, body, query_params):
rest_api = self.get_rest_api(function_id)
return import_api_from_openapi_spec(rest_api, function_id, body, query_params)
def _patch_api_gateway_entity(self, entity: Dict) -> Optional[Tuple[int, Dict, str]]:
not_supported_attributes = ["/id", "/region_name", "/create_date"]
patch_operations = self._get_param("patchOperations")
model_attributes = list(entity.keys())
for operation in patch_operations:
if operation["path"].strip("/") in REST_API_ATTRIBUTES:
operation["path"] = camelcase_to_underscores(operation["path"])
path_start = operation["path"].strip("/").split("/")[0]
path_start_usc = camelcase_to_underscores(path_start)
if path_start not in model_attributes and path_start_usc in model_attributes:
operation["path"] = operation["path"].replace(path_start, path_start_usc)
if operation["path"] in not_supported_attributes:
msg = "Invalid patch path %s" % (operation["path"])
return 400, {}, msg
apply_json_patch_safe(entity, patch_operations, in_place=True)
def apigateway_response_restapis_individual(self, request, full_url, headers):
if request.method in ["GET", "DELETE"]:
return apigateway_response_restapis_individual_orig(self, request, full_url, headers)
self.setup_class(request, full_url, headers)
function_id = self.path.replace("/restapis/", "", 1).split("/")[0]
if self.method == "PATCH":
rest_api = self.backend.apis.get(function_id)
if not rest_api:
msg = "Invalid API identifier specified %s:%s" % (
TEST_AWS_ACCOUNT_ID,
function_id,
)
return 404, {}, msg
if not isinstance(rest_api.__dict__, DelSafeDict):
rest_api.__dict__ = DelSafeDict(rest_api.__dict__)
result = _patch_api_gateway_entity(self, rest_api.__dict__)
if result is not None:
return result
# fix data types after patches have been applied
rest_api.minimum_compression_size = int(rest_api.minimum_compression_size or -1)
endpoint_configs = rest_api.endpoint_configuration or {}
if isinstance(endpoint_configs.get("vpcEndpointIds"), str):
endpoint_configs["vpcEndpointIds"] = [endpoint_configs["vpcEndpointIds"]]
return 200, {}, json.dumps(self.backend.get_rest_api(function_id).to_dict())
# handle import rest_api via swagger file
if self.method == "PUT":
body = json.loads(to_str(self.body))
rest_api = self.backend.put_rest_api(function_id, body, self.querystring)
return 200, {}, json.dumps(rest_api.to_dict())
return 400, {}, ""
def apigateway_response_resource_individual(self, request, full_url, headers):
if request.method in ["GET", "POST", "DELETE"]:
return apigateway_response_resource_individual_orig(self, request, full_url, headers)
self.setup_class(request, full_url, headers)
function_id = self.path.replace("/restapis/", "", 1).split("/")[0]
if self.method == "PATCH":
resource_id = self.path.split("/")[4]
resource = self.backend.get_resource(function_id, resource_id)
if not isinstance(resource.__dict__, DelSafeDict):
resource.__dict__ = DelSafeDict(resource.__dict__)
result = _patch_api_gateway_entity(self, resource.__dict__)
if result is not None:
return result
return 200, {}, json.dumps(resource.to_dict())
return 404, {}, ""
def apigateway_response_resource_methods(self, request, *args, **kwargs):
result = apigateway_response_resource_methods_orig(self, request, *args, **kwargs)
if self.method == "PUT" and self._get_param("requestParameters"):
request_parameters = self._get_param("requestParameters")
url_path_parts = self.path.split("/")
function_id = url_path_parts[2]
resource_id = url_path_parts[4]
method_type = url_path_parts[6]
resource = self.backend.get_resource(function_id, resource_id)
resource.resource_methods[method_type]["requestParameters"] = request_parameters
method = resource.resource_methods[method_type]
result = 200, {}, json.dumps(method)
if len(result) != 3:
return result
authorization_type = self._get_param("authorizationType")
if authorization_type in ["CUSTOM", "COGNITO_USER_POOLS"]:
data = json.loads(result[2])
if not data.get("authorizerId"):
payload = json.loads(to_str(request.data))
if "authorizerId" in payload:
data["authorizerId"] = payload["authorizerId"]
result = result[0], result[1], json.dumps(data)
return result
def apigateway_response_integrations(self, request, *args, **kwargs):
result = apigateway_response_integrations_orig(self, request, *args, **kwargs)
if self.method not in ["PUT", "PATCH"]:
return result
url_path_parts = self.path.split("/")
function_id = url_path_parts[2]
resource_id = url_path_parts[4]
method_type = url_path_parts[6]
integration = self.backend.get_integration(function_id, resource_id, method_type)
if not integration:
return result
if self.method == "PUT":
timeout_milliseconds = self._get_param("timeoutInMillis")
request_parameters = self._get_param("requestParameters") or {}
cache_key_parameters = self._get_param("cacheKeyParameters") or []
content_handling = self._get_param("contentHandling")
integration["timeoutInMillis"] = timeout_milliseconds
integration["requestParameters"] = request_parameters
integration["cacheKeyParameters"] = cache_key_parameters
integration["contentHandling"] = content_handling
return 200, {}, json.dumps(integration)
if self.method == "PATCH":
patch_operations = self._get_param("patchOperations")
apply_json_patch_safe(integration, patch_operations, in_place=True)
# fix data types
if integration.get("timeoutInMillis"):
integration["timeoutInMillis"] = int(integration.get("timeoutInMillis"))
skip_verification = (integration.get("tlsConfig") or {}).get("insecureSkipVerification")
if skip_verification:
integration["tlsConfig"]["insecureSkipVerification"] = str_to_bool(
skip_verification
)
return result
def apigateway_response_integration_responses(self, request, *args, **kwargs):
result = apigateway_response_integration_responses_orig(self, request, *args, **kwargs)
response_parameters = self._get_param("responseParameters")
if self.method == "PUT" and response_parameters:
url_path_parts = self.path.split("/")
function_id = url_path_parts[2]
resource_id = url_path_parts[4]
method_type = url_path_parts[6]
status_code = url_path_parts[9]
integration_response = self.backend.get_integration_response(
function_id, resource_id, method_type, status_code
)
integration_response["responseParameters"] = response_parameters
return 200, {}, json.dumps(integration_response)
return result
def apigateway_response_resource_method_responses(self, request, *args, **kwargs):
result = apigateway_response_resource_method_responses_orig(self, request, *args, **kwargs)
response_parameters = self._get_param("responseParameters")
if self.method == "PUT" and response_parameters:
url_path_parts = self.path.split("/")
function_id = url_path_parts[2]
resource_id = url_path_parts[4]
method_type = url_path_parts[6]
response_code = url_path_parts[8]
method_response = self.backend.get_method_response(
function_id, resource_id, method_type, response_code
)
method_response["responseParameters"] = response_parameters
return 200, {}, json.dumps(method_response)
return result
def apigateway_response_usage_plan_individual(
self, request, full_url, headers, *args, **kwargs
):
self.setup_class(request, full_url, headers)
if self.method == "PATCH":
url_path_parts = self.path.split("/")
usage_plan_id = url_path_parts[2]
patch_operations = self._get_param("patchOperations")
usage_plan = self.backend.usage_plans.get(usage_plan_id)
if not usage_plan:
raise UsagePlanNotFoundException()
apply_json_patch_safe(usage_plan, patch_operations, in_place=True)
# fix certain attributes after running the patch updates
if isinstance(usage_plan.get("apiStages"), (dict, str)):
usage_plan["apiStages"] = [usage_plan["apiStages"]]
api_stages = usage_plan.get("apiStages") or []
for i in range(len(api_stages)):
if isinstance(api_stages[i], str) and ":" in api_stages[i]:
api_id, stage = api_stages[i].split(":")
api_stages[i] = {"apiId": api_id, "stage": stage}
return 200, {}, json.dumps(usage_plan)
result = apigateway_response_usage_plan_individual_orig(
self, request, full_url, headers, *args, **kwargs
)
return result
def backend_update_deployment(self, function_id, deployment_id, patch_operations):
rest_api = self.get_rest_api(function_id)
deployment = rest_api.get_deployment(deployment_id)
deployment = deployment or {}
apply_json_patch_safe(deployment, patch_operations, in_place=True)
return deployment
# define json-patch operations for backend models
def backend_model_apply_operations(self, patch_operations):
# run pre-actions
if isinstance(self, apigateway_models.Stage):
if [op for op in patch_operations if "/accessLogSettings" in op.get("path", "")]:
self["accessLogSettings"] = self.get("accessLogSettings") or {}
# apply patches
apply_json_patch_safe(self, patch_operations, in_place=True)
# run post-actions
if isinstance(self, apigateway_models.Stage):
bool_params = ["cacheClusterEnabled", "tracingEnabled"]
for bool_param in bool_params:
if self.get(bool_param):
self[bool_param] = str_to_bool(self.get(bool_param))
return self
model_classes = [
apigateway_models.Authorizer,
apigateway_models.DomainName,
apigateway_models.Method,
apigateway_models.MethodResponse,
apigateway_models.Stage,
]
for model_class in model_classes:
model_class.apply_operations = (
model_class.apply_patch_operations
) = backend_model_apply_operations
# fix data types for some json-patch operation values
def method_apply_operations(self, patch_operations):
result = method_apply_operations_orig(self, patch_operations)
params = self.get("requestParameters") or {}
bool_params_prefixes = ["method.request.querystring", "method.request.header"]
list_params = ["authorizationScopes"]
for param, value in params.items():
for param_prefix in bool_params_prefixes:
if param.startswith(param_prefix):
params[param] = str_to_bool(value)
for list_param in list_params:
value = self.get(list_param)
if value and not isinstance(value, list):
self[list_param] = [value]
return result
method_apply_operations_orig = apigateway_models.Method.apply_operations
apigateway_models.Method.apply_operations = method_apply_operations
def method_response_apply_operations(self, patch_operations):
result = method_response_apply_operations_orig(self, patch_operations)
params = self.get("responseParameters") or {}
bool_params_prefixes = ["method.response.querystring", "method.response.header"]
for param, value in params.items():
for param_prefix in bool_params_prefixes:
if param.startswith(param_prefix) and not isinstance(value, bool):
params[param] = str(value) in ["true", "True"]
return result
method_response_apply_operations_orig = apigateway_models.MethodResponse.apply_operations
apigateway_models.MethodResponse.apply_operations = method_response_apply_operations
def stage_apply_operations(self, patch_operations):
result = stage_apply_operations_orig(self, patch_operations)
key_mappings = {
"metrics/enabled": ("metricsEnabled", bool),
"logging/loglevel": ("loggingLevel", str),
"logging/dataTrace": ("dataTraceEnabled", bool),
"throttling/burstLimit": ("throttlingBurstLimit", int),
"throttling/rateLimit": ("throttlingRateLimit", float),
"caching/enabled": ("cachingEnabled", bool),
"caching/ttlInSeconds": ("cacheTtlInSeconds", int),
"caching/dataEncrypted": ("cacheDataEncrypted", bool),
"caching/requireAuthorizationForCacheControl": (
"requireAuthorizationForCacheControl",
bool,
),
"caching/unauthorizedCacheControlHeaderStrategy": (
"unauthorizedCacheControlHeaderStrategy",
str,
),
}
def cast_value(value, value_type):
if value is None:
return value
if value_type == bool:
return str(value) in ["true", "True"]
return value_type(value)
method_settings = self["methodSettings"] = self.get("methodSettings") or {}
for operation in patch_operations:
path = operation["path"]
parts = path.strip("/").split("/")
if len(parts) >= 4:
if operation["op"] not in ["add", "replace"]:
continue
key1 = "/".join(parts[:-2])
setting_key = "%s/%s" % (parts[-2], parts[-1])
setting_name, setting_type = key_mappings.get(setting_key)
keys = [key1]
for key in keys:
setting = method_settings[key] = method_settings.get(key) or {}
value = operation.get("value")
value = cast_value(value, setting_type)
setting[setting_name] = value
if operation["op"] == "remove":
method_settings.pop(path, None)
method_settings.pop(path.lstrip("/"), None)
return result
stage_apply_operations_orig = apigateway_models.Stage.apply_operations
apigateway_models.Stage.apply_operations = stage_apply_operations
# patch integration error responses
def apigateway_models_resource_get_integration(self, method_type):
resource_method = self.resource_methods.get(method_type, {})
if "methodIntegration" not in resource_method:
raise NoIntegrationDefined()
return resource_method["methodIntegration"]
if not hasattr(apigateway_models.APIGatewayBackend, "put_rest_api"):
apigateway_response_restapis_individual_orig = APIGatewayResponse.restapis_individual
APIGatewayResponse.restapis_individual = apigateway_response_restapis_individual
apigateway_response_resource_individual_orig = APIGatewayResponse.resource_individual
APIGatewayResponse.resource_individual = apigateway_response_resource_individual
apigateway_models.APIGatewayBackend.put_rest_api = apigateway_models_backend_put_rest_api
if not hasattr(apigateway_models.APIGatewayBackend, "delete_method"):
apigateway_models.APIGatewayBackend.delete_method = apigateway_models_backend_delete_method
if not hasattr(apigateway_models.APIGatewayBackend, "update_deployment"):
apigateway_models.APIGatewayBackend.update_deployment = backend_update_deployment
apigateway_models_RestAPI_to_dict_orig = apigateway_models.RestAPI.to_dict
def apigateway_models_RestAPI_to_dict(self):
resp = apigateway_models_RestAPI_to_dict_orig(self)
resp["policy"] = None
if self.policy:
# Strip whitespaces for TF compatibility (not entirely sure why we need double-dumps,
# but otherwise: "error normalizing policy JSON: invalid character 'V' after top-level value")
resp["policy"] = json.dumps(json.dumps(json.loads(self.policy), separators=(",", ":")))[
1:-1
]
for attr in REST_API_ATTRIBUTES:
if attr not in resp:
resp[attr] = getattr(self, camelcase_to_underscores(attr), None)
resp["disableExecuteApiEndpoint"] = bool(
re.match(
r"true",
resp.get("disableExecuteApiEndpoint") or "",
flags=re.IGNORECASE,
)
)
return resp
apigateway_response_restapis_orig = APIGatewayResponse.restapis
# https://github.com/localstack/localstack/issues/171
def apigateway_response_restapis(self, request, full_url, headers):
parsed_qs = parse_qs(urlparse(full_url).query)
modes = parsed_qs.get("mode", [])
status, _, rest_api = apigateway_response_restapis_orig(self, request, full_url, headers)
if "import" not in modes:
return status, _, rest_api
function_id = json.loads(rest_api)["id"]
body = json.loads(request.data.decode("utf-8"))
self.backend.put_rest_api(function_id, body, parsed_qs)
return 200, {}, rest_api
def individual_deployment(self, request, full_url, headers, *args, **kwargs):
result = individual_deployment_orig(self, request, full_url, headers, *args, **kwargs)
if self.method == "PATCH" and len(result) >= 3 and result[2] in ["null", None, str(None)]:
url_path_parts = self.path.split("/")
function_id = url_path_parts[2]
deployment_id = url_path_parts[4]
patch_operations = self._get_param("patchOperations")
deployment = self.backend.update_deployment(
function_id, deployment_id, patch_operations
)
return 200, {}, json.dumps(deployment)
return result
# patch create_rest_api to allow using static API IDs defined via tags
def create_rest_api(self, *args, tags={}, **kwargs):
result = create_rest_api_orig(self, *args, tags=tags, **kwargs)
tags = tags or {}
custom_id = tags.get(TAG_KEY_CUSTOM_ID)
if custom_id:
self.apis.pop(result.id)
result.id = custom_id
self.apis[custom_id] = result
return result
create_rest_api_orig = apigateway_models.APIGatewayBackend.create_rest_api
apigateway_models.APIGatewayBackend.create_rest_api = create_rest_api
apigateway_models.Resource.get_integration = apigateway_models_resource_get_integration
apigateway_models.Resource.delete_integration = apigateway_models_resource_delete_integration
apigateway_response_resource_methods_orig = APIGatewayResponse.resource_methods
APIGatewayResponse.resource_methods = apigateway_response_resource_methods
individual_deployment_orig = APIGatewayResponse.individual_deployment
APIGatewayResponse.individual_deployment = individual_deployment
apigateway_response_integrations_orig = APIGatewayResponse.integrations
APIGatewayResponse.integrations = apigateway_response_integrations
apigateway_response_integration_responses_orig = APIGatewayResponse.integration_responses
APIGatewayResponse.integration_responses = apigateway_response_integration_responses
apigateway_response_resource_method_responses_orig = (
APIGatewayResponse.resource_method_responses
)
APIGatewayResponse.resource_method_responses = apigateway_response_resource_method_responses
apigateway_response_usage_plan_individual_orig = APIGatewayResponse.usage_plan_individual
APIGatewayResponse.usage_plan_individual = apigateway_response_usage_plan_individual
apigateway_models_Integration_init_orig = apigateway_models.Integration.__init__
apigateway_models.Integration.__init__ = apigateway_models_Integration_init
apigateway_models.RestAPI.to_dict = apigateway_models_RestAPI_to_dict
APIGatewayResponse.restapis = apigateway_response_restapis
def start_apigateway(port=None, backend_port=None, asynchronous=None, update_listener=None):
port = port or config.PORT_APIGATEWAY
apply_patches()
result = start_moto_server(
key="apigateway",
name="API Gateway",
asynchronous=asynchronous,
port=port,
backend_port=backend_port,
update_listener=update_listener,
)
return result
| 1 | 14,308 | moto mock setup needs to happen before boto usage. | localstack-localstack | py |
@@ -199,6 +199,8 @@ def train(params, train_set, num_boost_round=100,
callbacks = set()
else:
for i, cb in enumerate(callbacks):
+ if hasattr(cb, 'first_metric_only') and cb.first_metric_only and feval is not None:
+ raise LightGBMError("`first_metric_only` and `feval` are not available at the same time.")
cb.__dict__.setdefault('order', i - len(callbacks))
callbacks = set(callbacks)
| 1 | # coding: utf-8
# pylint: disable = invalid-name, W0105
"""Library with training routines of LightGBM."""
from __future__ import absolute_import
import collections
import copy
import warnings
from operator import attrgetter
import numpy as np
from . import callback
from .basic import Booster, Dataset, LightGBMError, _InnerPredictor
from .compat import (SKLEARN_INSTALLED, _LGBMGroupKFold, _LGBMStratifiedKFold,
string_type, integer_types, range_, zip_)
def train(params, train_set, num_boost_round=100,
valid_sets=None, valid_names=None,
fobj=None, feval=None, init_model=None,
feature_name='auto', categorical_feature='auto',
early_stopping_rounds=None, evals_result=None,
verbose_eval=True, learning_rates=None,
keep_training_booster=False, callbacks=None):
"""Perform the training with given parameters.
Parameters
----------
params : dict
Parameters for training.
train_set : Dataset
Data to be trained on.
num_boost_round : int, optional (default=100)
Number of boosting iterations.
valid_sets : list of Datasets or None, optional (default=None)
List of data to be evaluated on during training.
valid_names : list of strings or None, optional (default=None)
Names of ``valid_sets``.
fobj : callable or None, optional (default=None)
Customized objective function.
Should accept two parameters: preds, train_data,
and return (grad, hess).
preds : list or numpy 1-D array
The predicted values.
train_data : Dataset
The training dataset.
grad : list or numpy 1-D array
The value of the first order derivative (gradient) for each sample point.
hess : list or numpy 1-D array
The value of the second order derivative (Hessian) for each sample point.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
train_data : Dataset
The training dataset.
eval_name : string
The name of evaluation function.
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
To ignore the default metric corresponding to the used objective,
set the ``metric`` parameter to the string ``"None"`` in ``params``.
init_model : string, Booster or None, optional (default=None)
Filename of LightGBM model or Booster instance used for continue training.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of strings or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
All values in categorical features should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
early_stopping_rounds : int or None, optional (default=None)
Activates early stopping. The model will train until the validation score stops improving.
Validation score needs to improve at least every ``early_stopping_rounds`` round(s)
to continue training.
Requires at least one validation data and one metric.
If there's more than one, will check all of them. But the training data is ignored anyway.
To check only the first metric, set the ``first_metric_only`` parameter to ``True`` in ``params``.
The index of iteration that has the best performance will be saved in the ``best_iteration`` field
if early stopping logic is enabled by setting ``early_stopping_rounds``.
evals_result: dict or None, optional (default=None)
This dictionary used to store all evaluation results of all the items in ``valid_sets``.
Example
-------
With a ``valid_sets`` = [valid_set, train_set],
``valid_names`` = ['eval', 'train']
and a ``params`` = {'metric': 'logloss'}
returns {'train': {'logloss': ['0.48253', '0.35953', ...]},
'eval': {'logloss': ['0.480385', '0.357756', ...]}}.
verbose_eval : bool or int, optional (default=True)
Requires at least one validation data.
If True, the eval metric on the valid set is printed at each boosting stage.
If int, the eval metric on the valid set is printed at every ``verbose_eval`` boosting stage.
The last boosting stage or the boosting stage found by using ``early_stopping_rounds`` is also printed.
Example
-------
With ``verbose_eval`` = 4 and at least one item in ``valid_sets``,
an evaluation metric is printed every 4 (instead of 1) boosting stages.
learning_rates : list, callable or None, optional (default=None)
List of learning rates for each boosting round
or a customized function that calculates ``learning_rate``
in terms of current number of round (e.g. yields learning rate decay).
keep_training_booster : bool, optional (default=False)
Whether the returned Booster will be used to keep training.
If False, the returned value will be converted into _InnerPredictor before returning.
You can still use _InnerPredictor as ``init_model`` for future continue training.
callbacks : list of callables or None, optional (default=None)
List of callback functions that are applied at each iteration.
See Callbacks in Python API for more information.
Returns
-------
booster : Booster
The trained Booster model.
"""
# create predictor first
params = copy.deepcopy(params)
if fobj is not None:
params['objective'] = 'none'
for alias in ["num_iterations", "num_iteration", "n_iter", "num_tree", "num_trees",
"num_round", "num_rounds", "num_boost_round", "n_estimators"]:
if alias in params:
num_boost_round = params.pop(alias)
warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
break
for alias in ["early_stopping_round", "early_stopping_rounds", "early_stopping"]:
if alias in params:
early_stopping_rounds = params.pop(alias)
warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
break
first_metric_only = params.pop('first_metric_only', False)
if num_boost_round <= 0:
raise ValueError("num_boost_round should be greater than zero.")
if isinstance(init_model, string_type):
predictor = _InnerPredictor(model_file=init_model, pred_parameter=params)
elif isinstance(init_model, Booster):
predictor = init_model._to_predictor(dict(init_model.params, **params))
else:
predictor = None
init_iteration = predictor.num_total_iteration if predictor is not None else 0
# check dataset
if not isinstance(train_set, Dataset):
raise TypeError("Training only accepts Dataset object")
train_set._update_params(params) \
._set_predictor(predictor) \
.set_feature_name(feature_name) \
.set_categorical_feature(categorical_feature)
is_valid_contain_train = False
train_data_name = "training"
reduced_valid_sets = []
name_valid_sets = []
if valid_sets is not None:
if isinstance(valid_sets, Dataset):
valid_sets = [valid_sets]
if isinstance(valid_names, string_type):
valid_names = [valid_names]
for i, valid_data in enumerate(valid_sets):
# reduce cost for prediction training data
if valid_data is train_set:
is_valid_contain_train = True
if valid_names is not None:
train_data_name = valid_names[i]
continue
if not isinstance(valid_data, Dataset):
raise TypeError("Traninig only accepts Dataset object")
reduced_valid_sets.append(valid_data._update_params(params).set_reference(train_set))
if valid_names is not None and len(valid_names) > i:
name_valid_sets.append(valid_names[i])
else:
name_valid_sets.append('valid_' + str(i))
# process callbacks
if callbacks is None:
callbacks = set()
else:
for i, cb in enumerate(callbacks):
cb.__dict__.setdefault('order', i - len(callbacks))
callbacks = set(callbacks)
# Most of legacy advanced options becomes callbacks
if verbose_eval is True:
callbacks.add(callback.print_evaluation())
elif isinstance(verbose_eval, integer_types):
callbacks.add(callback.print_evaluation(verbose_eval))
if early_stopping_rounds is not None:
callbacks.add(callback.early_stopping(early_stopping_rounds, first_metric_only, verbose=bool(verbose_eval)))
if learning_rates is not None:
callbacks.add(callback.reset_parameter(learning_rate=learning_rates))
if evals_result is not None:
callbacks.add(callback.record_evaluation(evals_result))
callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
callbacks_after_iter = callbacks - callbacks_before_iter
callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
# construct booster
try:
booster = Booster(params=params, train_set=train_set)
if is_valid_contain_train:
booster.set_train_data_name(train_data_name)
for valid_set, name_valid_set in zip_(reduced_valid_sets, name_valid_sets):
booster.add_valid(valid_set, name_valid_set)
finally:
train_set._reverse_update_params()
for valid_set in reduced_valid_sets:
valid_set._reverse_update_params()
booster.best_iteration = 0
# start training
for i in range_(init_iteration, init_iteration + num_boost_round):
for cb in callbacks_before_iter:
cb(callback.CallbackEnv(model=booster,
params=params,
iteration=i,
begin_iteration=init_iteration,
end_iteration=init_iteration + num_boost_round,
evaluation_result_list=None))
booster.update(fobj=fobj)
evaluation_result_list = []
# check evaluation result.
if valid_sets is not None:
if is_valid_contain_train:
evaluation_result_list.extend(booster.eval_train(feval))
evaluation_result_list.extend(booster.eval_valid(feval))
try:
for cb in callbacks_after_iter:
cb(callback.CallbackEnv(model=booster,
params=params,
iteration=i,
begin_iteration=init_iteration,
end_iteration=init_iteration + num_boost_round,
evaluation_result_list=evaluation_result_list))
except callback.EarlyStopException as earlyStopException:
booster.best_iteration = earlyStopException.best_iteration + 1
evaluation_result_list = earlyStopException.best_score
break
booster.best_score = collections.defaultdict(dict)
for dataset_name, eval_name, score, _ in evaluation_result_list:
booster.best_score[dataset_name][eval_name] = score
if not keep_training_booster:
booster.model_from_string(booster.model_to_string(), False).free_dataset()
return booster
class _CVBooster(object):
"""Auxiliary data struct to hold all boosters of CV."""
def __init__(self):
self.boosters = []
self.best_iteration = -1
def append(self, booster):
"""Add a booster to _CVBooster."""
self.boosters.append(booster)
def __getattr__(self, name):
"""Redirect methods call of _CVBooster."""
def handler_function(*args, **kwargs):
"""Call methods with each booster, and concatenate their results."""
ret = []
for booster in self.boosters:
ret.append(getattr(booster, name)(*args, **kwargs))
return ret
return handler_function
def _make_n_folds(full_data, folds, nfold, params, seed, fpreproc=None, stratified=True,
shuffle=True, eval_train_metric=False):
"""Make a n-fold list of Booster from random indices."""
full_data = full_data.construct()
num_data = full_data.num_data()
if folds is not None:
if not hasattr(folds, '__iter__') and not hasattr(folds, 'split'):
raise AttributeError("folds should be a generator or iterator of (train_idx, test_idx) tuples "
"or scikit-learn splitter object with split method")
if hasattr(folds, 'split'):
group_info = full_data.get_group()
if group_info is not None:
group_info = group_info.astype(int)
flatted_group = np.repeat(range_(len(group_info)), repeats=group_info)
else:
flatted_group = np.zeros(num_data, dtype=int)
folds = folds.split(X=np.zeros(num_data), y=full_data.get_label(), groups=flatted_group)
else:
if 'objective' in params and params['objective'] == 'lambdarank':
if not SKLEARN_INSTALLED:
raise LightGBMError('Scikit-learn is required for lambdarank cv.')
# lambdarank task, split according to groups
group_info = full_data.get_group().astype(int)
flatted_group = np.repeat(range_(len(group_info)), repeats=group_info)
group_kfold = _LGBMGroupKFold(n_splits=nfold)
folds = group_kfold.split(X=np.zeros(num_data), groups=flatted_group)
elif stratified:
if not SKLEARN_INSTALLED:
raise LightGBMError('Scikit-learn is required for stratified cv.')
skf = _LGBMStratifiedKFold(n_splits=nfold, shuffle=shuffle, random_state=seed)
folds = skf.split(X=np.zeros(num_data), y=full_data.get_label())
else:
if shuffle:
randidx = np.random.RandomState(seed).permutation(num_data)
else:
randidx = np.arange(num_data)
kstep = int(num_data / nfold)
test_id = [randidx[i: i + kstep] for i in range_(0, num_data, kstep)]
train_id = [np.concatenate([test_id[i] for i in range_(nfold) if k != i]) for k in range_(nfold)]
folds = zip_(train_id, test_id)
ret = _CVBooster()
for train_idx, test_idx in folds:
train_set = full_data.subset(train_idx)
valid_set = full_data.subset(test_idx)
# run preprocessing on the data set if needed
if fpreproc is not None:
train_set, valid_set, tparam = fpreproc(train_set, valid_set, params.copy())
else:
tparam = params
cvbooster = Booster(tparam, train_set)
if eval_train_metric:
cvbooster.add_valid(train_set, 'train')
cvbooster.add_valid(valid_set, 'valid')
ret.append(cvbooster)
return ret
def _agg_cv_result(raw_results, eval_train_metric=False):
"""Aggregate cross-validation results."""
cvmap = collections.defaultdict(list)
metric_type = {}
for one_result in raw_results:
for one_line in one_result:
if eval_train_metric:
key = "{} {}".format(one_line[0], one_line[1])
else:
key = one_line[1]
metric_type[key] = one_line[3]
cvmap[key].append(one_line[2])
return [('cv_agg', k, np.mean(v), metric_type[k], np.std(v)) for k, v in cvmap.items()]
def cv(params, train_set, num_boost_round=100,
folds=None, nfold=5, stratified=True, shuffle=True,
metrics=None, fobj=None, feval=None, init_model=None,
feature_name='auto', categorical_feature='auto',
early_stopping_rounds=None, fpreproc=None,
verbose_eval=None, show_stdv=True, seed=0,
callbacks=None, eval_train_metric=False):
"""Perform the cross-validation with given paramaters.
Parameters
----------
params : dict
Parameters for Booster.
train_set : Dataset
Data to be trained on.
num_boost_round : int, optional (default=100)
Number of boosting iterations.
folds : generator or iterator of (train_idx, test_idx) tuples, scikit-learn splitter object or None, optional (default=None)
If generator or iterator, it should yield the train and test indices for each fold.
If object, it should be one of the scikit-learn splitter classes
(https://scikit-learn.org/stable/modules/classes.html#splitter-classes)
and have ``split`` method.
This argument has highest priority over other data split arguments.
nfold : int, optional (default=5)
Number of folds in CV.
stratified : bool, optional (default=True)
Whether to perform stratified sampling.
shuffle : bool, optional (default=True)
Whether to shuffle before splitting data.
metrics : string, list of strings or None, optional (default=None)
Evaluation metrics to be monitored while CV.
If not None, the metric in ``params`` will be overridden.
fobj : callable or None, optional (default=None)
Customized objective function.
Should accept two parameters: preds, train_data,
and return (grad, hess).
preds : list or numpy 1-D array
The predicted values.
train_data : Dataset
The training dataset.
grad : list or numpy 1-D array
The value of the first order derivative (gradient) for each sample point.
hess : list or numpy 1-D array
The value of the second order derivative (Hessian) for each sample point.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
train_data : Dataset
The training dataset.
eval_name : string
The name of evaluation function.
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
To ignore the default metric corresponding to the used objective,
set ``metrics`` to the string ``"None"``.
init_model : string, Booster or None, optional (default=None)
Filename of LightGBM model or Booster instance used for continue training.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of strings or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
All values in categorical features should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
early_stopping_rounds : int or None, optional (default=None)
Activates early stopping.
CV score needs to improve at least every ``early_stopping_rounds`` round(s)
to continue.
Requires at least one metric. If there's more than one, will check all of them.
To check only the first metric, set the ``first_metric_only`` parameter to ``True`` in ``params``.
Last entry in evaluation history is the one from the best iteration.
fpreproc : callable or None, optional (default=None)
Preprocessing function that takes (dtrain, dtest, params)
and returns transformed versions of those.
verbose_eval : bool, int, or None, optional (default=None)
Whether to display the progress.
If None, progress will be displayed when np.ndarray is returned.
If True, progress will be displayed at every boosting stage.
If int, progress will be displayed at every given ``verbose_eval`` boosting stage.
show_stdv : bool, optional (default=True)
Whether to display the standard deviation in progress.
Results are not affected by this parameter, and always contain std.
seed : int, optional (default=0)
Seed used to generate the folds (passed to numpy.random.seed).
callbacks : list of callables or None, optional (default=None)
List of callback functions that are applied at each iteration.
See Callbacks in Python API for more information.
eval_train_metric : bool, optional (default=False)
Whether to display the train metric in progress.
The score of the metric is calculated again after each training step, so there is some impact on performance.
Returns
-------
eval_hist : dict
Evaluation history.
The dictionary has the following format:
{'metric1-mean': [values], 'metric1-stdv': [values],
'metric2-mean': [values], 'metric2-stdv': [values],
...}.
"""
if not isinstance(train_set, Dataset):
raise TypeError("Traninig only accepts Dataset object")
params = copy.deepcopy(params)
if fobj is not None:
params['objective'] = 'none'
for alias in ["num_iterations", "num_iteration", "n_iter", "num_tree", "num_trees",
"num_round", "num_rounds", "num_boost_round", "n_estimators"]:
if alias in params:
warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
num_boost_round = params.pop(alias)
break
for alias in ["early_stopping_round", "early_stopping_rounds", "early_stopping"]:
if alias in params:
warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
early_stopping_rounds = params.pop(alias)
break
first_metric_only = params.pop('first_metric_only', False)
if num_boost_round <= 0:
raise ValueError("num_boost_round should be greater than zero.")
if isinstance(init_model, string_type):
predictor = _InnerPredictor(model_file=init_model, pred_parameter=params)
elif isinstance(init_model, Booster):
predictor = init_model._to_predictor(dict(init_model.params, **params))
else:
predictor = None
train_set._update_params(params) \
._set_predictor(predictor) \
.set_feature_name(feature_name) \
.set_categorical_feature(categorical_feature)
if metrics is not None:
params['metric'] = metrics
results = collections.defaultdict(list)
cvfolds = _make_n_folds(train_set, folds=folds, nfold=nfold,
params=params, seed=seed, fpreproc=fpreproc,
stratified=stratified, shuffle=shuffle,
eval_train_metric=eval_train_metric)
# setup callbacks
if callbacks is None:
callbacks = set()
else:
for i, cb in enumerate(callbacks):
cb.__dict__.setdefault('order', i - len(callbacks))
callbacks = set(callbacks)
if early_stopping_rounds is not None:
callbacks.add(callback.early_stopping(early_stopping_rounds, first_metric_only, verbose=False))
if verbose_eval is True:
callbacks.add(callback.print_evaluation(show_stdv=show_stdv))
elif isinstance(verbose_eval, integer_types):
callbacks.add(callback.print_evaluation(verbose_eval, show_stdv=show_stdv))
callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
callbacks_after_iter = callbacks - callbacks_before_iter
callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
for i in range_(num_boost_round):
for cb in callbacks_before_iter:
cb(callback.CallbackEnv(model=cvfolds,
params=params,
iteration=i,
begin_iteration=0,
end_iteration=num_boost_round,
evaluation_result_list=None))
cvfolds.update(fobj=fobj)
res = _agg_cv_result(cvfolds.eval_valid(feval), eval_train_metric)
for _, key, mean, _, std in res:
results[key + '-mean'].append(mean)
results[key + '-stdv'].append(std)
try:
for cb in callbacks_after_iter:
cb(callback.CallbackEnv(model=cvfolds,
params=params,
iteration=i,
begin_iteration=0,
end_iteration=num_boost_round,
evaluation_result_list=res))
except callback.EarlyStopException as earlyStopException:
cvfolds.best_iteration = earlyStopException.best_iteration + 1
for k in results:
results[k] = results[k][:cvfolds.best_iteration]
break
return dict(results)
| 1 | 20,419 | `hasattr(cb, 'first_metric_only') and cb.first_metric_only` -> `getattr(cb, 'first_metric_only', False)` | microsoft-LightGBM | cpp |
@@ -107,3 +107,17 @@ func getProtoRequest(ctx context.Context, transportRequest *transport.Request, n
}
return ctx, call, request, nil
}
+
+type streamHandler struct {
+ handle func(ServerStream) error
+}
+
+func newStreamHandler(
+ handle func(ServerStream) error,
+) *streamHandler {
+ return &streamHandler{handle}
+}
+
+func (s *streamHandler) HandleStream(stream transport.ServerStream) error {
+ return s.handle(stream)
+} | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package protobuf
import (
"context"
"github.com/gogo/protobuf/proto"
apiencoding "go.uber.org/yarpc/api/encoding"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/pkg/errors"
)
type unaryHandler struct {
handle func(context.Context, proto.Message) (proto.Message, error)
newRequest func() proto.Message
}
func newUnaryHandler(
handle func(context.Context, proto.Message) (proto.Message, error),
newRequest func() proto.Message,
) *unaryHandler {
return &unaryHandler{handle, newRequest}
}
func (u *unaryHandler) Handle(ctx context.Context, transportRequest *transport.Request, responseWriter transport.ResponseWriter) error {
ctx, call, request, err := getProtoRequest(ctx, transportRequest, u.newRequest)
if err != nil {
return err
}
response, appErr := u.handle(ctx, request)
if err := call.WriteToResponse(responseWriter); err != nil {
return err
}
var responseData []byte
var responseCleanup func()
if response != nil {
responseData, responseCleanup, err = marshal(transportRequest.Encoding, response)
if responseCleanup != nil {
defer responseCleanup()
}
if err != nil {
return errors.ResponseBodyEncodeError(transportRequest, err)
}
}
_, err = responseWriter.Write(responseData)
if err != nil {
return err
}
if appErr != nil {
responseWriter.SetApplicationError()
}
return appErr
}
type onewayHandler struct {
handleOneway func(context.Context, proto.Message) error
newRequest func() proto.Message
}
func newOnewayHandler(
handleOneway func(context.Context, proto.Message) error,
newRequest func() proto.Message,
) *onewayHandler {
return &onewayHandler{handleOneway, newRequest}
}
func (o *onewayHandler) HandleOneway(ctx context.Context, transportRequest *transport.Request) error {
ctx, _, request, err := getProtoRequest(ctx, transportRequest, o.newRequest)
if err != nil {
return err
}
return o.handleOneway(ctx, request)
}
func getProtoRequest(ctx context.Context, transportRequest *transport.Request, newRequest func() proto.Message) (context.Context, *apiencoding.InboundCall, proto.Message, error) {
if err := errors.ExpectEncodings(transportRequest, Encoding, JSONEncoding); err != nil {
return nil, nil, nil, err
}
ctx, call := apiencoding.NewInboundCall(ctx)
if err := call.ReadFromRequest(transportRequest); err != nil {
return nil, nil, nil, err
}
request := newRequest()
if err := unmarshal(transportRequest.Encoding, transportRequest.Body, request); err != nil {
return nil, nil, nil, errors.RequestBodyDecodeError(transportRequest, err)
}
return ctx, call, request, nil
}
| 1 | 15,181 | no newline for function | yarpc-yarpc-go | go |
@@ -240,7 +240,7 @@ static void config_head_handle_set_scale(struct wl_client *client,
return;
}
- double scale = wl_fixed_to_double(scale_fixed);
+ float scale = wl_fixed_to_double(scale_fixed);
if (scale <= 0) {
wl_resource_post_error(config_head_resource,
ZWLR_OUTPUT_CONFIGURATION_HEAD_V1_ERROR_INVALID_SCALE, | 1 | #include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <wlr/types/wlr_output_management_v1.h>
#include <wlr/util/log.h>
#include "util/signal.h"
#include "wlr-output-management-unstable-v1-protocol.h"
#define OUTPUT_MANAGER_VERSION 2
enum {
HEAD_STATE_ENABLED = 1 << 0,
HEAD_STATE_MODE = 1 << 1,
HEAD_STATE_POSITION = 1 << 2,
HEAD_STATE_TRANSFORM = 1 << 3,
HEAD_STATE_SCALE = 1 << 4,
};
static const uint32_t HEAD_STATE_ALL = HEAD_STATE_ENABLED | HEAD_STATE_MODE |
HEAD_STATE_POSITION | HEAD_STATE_TRANSFORM | HEAD_STATE_SCALE;
// Can return NULL if the head is inert
static struct wlr_output_head_v1 *head_from_resource(
struct wl_resource *resource) {
assert(wl_resource_instance_of(resource,
&zwlr_output_head_v1_interface, NULL));
return wl_resource_get_user_data(resource);
}
static struct wlr_output_mode *mode_from_resource(
struct wl_resource *resource) {
assert(wl_resource_instance_of(resource,
&zwlr_output_mode_v1_interface, NULL));
return wl_resource_get_user_data(resource);
}
static void head_destroy(struct wlr_output_head_v1 *head) {
if (head == NULL) {
return;
}
struct wl_resource *resource, *tmp;
wl_resource_for_each_safe(resource, tmp, &head->mode_resources) {
zwlr_output_mode_v1_send_finished(resource);
wl_resource_destroy(resource);
}
wl_resource_for_each_safe(resource, tmp, &head->resources) {
zwlr_output_head_v1_send_finished(resource);
wl_resource_destroy(resource);
}
wl_list_remove(&head->link);
wl_list_remove(&head->output_destroy.link);
free(head);
}
static void head_handle_output_destroy(struct wl_listener *listener,
void *data) {
struct wlr_output_head_v1 *head =
wl_container_of(listener, head, output_destroy);
head->manager->current_configuration_dirty = true;
head_destroy(head);
}
static struct wlr_output_head_v1 *head_create(
struct wlr_output_manager_v1 *manager, struct wlr_output *output) {
struct wlr_output_head_v1 *head = calloc(1, sizeof(*head));
if (head == NULL) {
return NULL;
}
head->manager = manager;
head->state.output = output;
wl_list_init(&head->resources);
wl_list_init(&head->mode_resources);
wl_list_insert(&manager->heads, &head->link);
head->output_destroy.notify = head_handle_output_destroy;
wl_signal_add(&output->events.destroy, &head->output_destroy);
return head;
}
static void config_head_destroy(
struct wlr_output_configuration_head_v1 *config_head) {
if (config_head == NULL) {
return;
}
if (config_head->resource != NULL) {
wl_resource_set_user_data(config_head->resource, NULL); // make inert
}
wl_list_remove(&config_head->link);
wl_list_remove(&config_head->output_destroy.link);
free(config_head);
}
static void config_head_handle_output_destroy(struct wl_listener *listener,
void *data) {
struct wlr_output_configuration_head_v1 *config_head =
wl_container_of(listener, config_head, output_destroy);
config_head_destroy(config_head);
}
static struct wlr_output_configuration_head_v1 *config_head_create(
struct wlr_output_configuration_v1 *config, struct wlr_output *output) {
struct wlr_output_configuration_head_v1 *config_head =
calloc(1, sizeof(*config_head));
if (config_head == NULL) {
return NULL;
}
config_head->config = config;
config_head->state.output = output;
wl_list_insert(&config->heads, &config_head->link);
config_head->output_destroy.notify = config_head_handle_output_destroy;
wl_signal_add(&output->events.destroy, &config_head->output_destroy);
return config_head;
}
struct wlr_output_configuration_head_v1 *
wlr_output_configuration_head_v1_create(
struct wlr_output_configuration_v1 *config, struct wlr_output *output) {
struct wlr_output_configuration_head_v1 *config_head =
config_head_create(config, output);
if (config_head == NULL) {
return NULL;
}
config_head->state.enabled = output->enabled;
config_head->state.mode = output->current_mode;
config_head->state.custom_mode.width = output->width;
config_head->state.custom_mode.height = output->height;
config_head->state.custom_mode.refresh = output->refresh;
config_head->state.transform = output->transform;
config_head->state.scale = output->scale;
return config_head;
}
static const struct zwlr_output_configuration_head_v1_interface config_head_impl;
// Can return NULL if the configuration head is inert
static struct wlr_output_configuration_head_v1 *config_head_from_resource(
struct wl_resource *resource) {
assert(wl_resource_instance_of(resource,
&zwlr_output_configuration_head_v1_interface, &config_head_impl));
return wl_resource_get_user_data(resource);
}
static void config_head_handle_set_mode(struct wl_client *client,
struct wl_resource *config_head_resource,
struct wl_resource *mode_resource) {
struct wlr_output_configuration_head_v1 *config_head =
config_head_from_resource(config_head_resource);
if (config_head == NULL) {
return;
}
// Mode can be NULL if the output doesn't support modes (in which case we
// expose only one "virtual" mode, the current mode)
struct wlr_output_mode *mode = mode_from_resource(mode_resource);
struct wlr_output *output = config_head->state.output;
bool found = (mode == NULL && wl_list_empty(&output->modes));
struct wlr_output_mode *m;
wl_list_for_each(m, &output->modes, link) {
if (mode == m) {
found = true;
break;
}
}
if (!found) {
wl_resource_post_error(config_head_resource,
ZWLR_OUTPUT_CONFIGURATION_HEAD_V1_ERROR_INVALID_MODE,
"mode doesn't belong to head");
return;
}
config_head->state.mode = mode;
if (mode != NULL) {
config_head->state.custom_mode.width = 0;
config_head->state.custom_mode.height = 0;
config_head->state.custom_mode.refresh = 0;
}
}
static void config_head_handle_set_custom_mode(struct wl_client *client,
struct wl_resource *config_head_resource, int32_t width, int32_t height,
int32_t refresh) {
struct wlr_output_configuration_head_v1 *config_head =
config_head_from_resource(config_head_resource);
if (config_head == NULL) {
return;
}
if (width <= 0 || height <= 0 || refresh < 0) {
wl_resource_post_error(config_head_resource,
ZWLR_OUTPUT_CONFIGURATION_HEAD_V1_ERROR_INVALID_CUSTOM_MODE,
"invalid custom mode");
return;
}
config_head->state.mode = NULL;
config_head->state.custom_mode.width = width;
config_head->state.custom_mode.height = height;
config_head->state.custom_mode.refresh = refresh;
}
static void config_head_handle_set_position(struct wl_client *client,
struct wl_resource *config_head_resource, int32_t x, int32_t y) {
struct wlr_output_configuration_head_v1 *config_head =
config_head_from_resource(config_head_resource);
if (config_head == NULL) {
return;
}
config_head->state.x = x;
config_head->state.y = y;
}
static void config_head_handle_set_transform(struct wl_client *client,
struct wl_resource *config_head_resource, int32_t transform) {
struct wlr_output_configuration_head_v1 *config_head =
config_head_from_resource(config_head_resource);
if (config_head == NULL) {
return;
}
if (transform < WL_OUTPUT_TRANSFORM_NORMAL ||
transform > WL_OUTPUT_TRANSFORM_FLIPPED_270) {
wl_resource_post_error(config_head_resource,
ZWLR_OUTPUT_CONFIGURATION_HEAD_V1_ERROR_INVALID_TRANSFORM,
"invalid transform");
return;
}
config_head->state.transform = transform;
}
static void config_head_handle_set_scale(struct wl_client *client,
struct wl_resource *config_head_resource, wl_fixed_t scale_fixed) {
struct wlr_output_configuration_head_v1 *config_head =
config_head_from_resource(config_head_resource);
if (config_head == NULL) {
return;
}
double scale = wl_fixed_to_double(scale_fixed);
if (scale <= 0) {
wl_resource_post_error(config_head_resource,
ZWLR_OUTPUT_CONFIGURATION_HEAD_V1_ERROR_INVALID_SCALE,
"invalid scale");
return;
}
config_head->state.scale = scale;
}
static const struct zwlr_output_configuration_head_v1_interface config_head_impl = {
.set_mode = config_head_handle_set_mode,
.set_custom_mode = config_head_handle_set_custom_mode,
.set_position = config_head_handle_set_position,
.set_transform = config_head_handle_set_transform,
.set_scale = config_head_handle_set_scale,
};
static void config_head_handle_resource_destroy(struct wl_resource *resource) {
struct wlr_output_configuration_head_v1 *config_head =
config_head_from_resource(resource);
config_head_destroy(config_head);
}
static const struct zwlr_output_configuration_v1_interface config_impl;
// Can return NULL if the config has been used
static struct wlr_output_configuration_v1 *config_from_resource(
struct wl_resource *resource) {
assert(wl_resource_instance_of(resource,
&zwlr_output_configuration_v1_interface, &config_impl));
return wl_resource_get_user_data(resource);
}
// Checks that the head is unconfigured (ie. no enable_head/disable_head request
// has yet been sent for this head), if not sends a protocol error.
static bool config_check_head_is_unconfigured(
struct wlr_output_configuration_v1 *config, struct wlr_output *output) {
struct wlr_output_configuration_head_v1 *head;
wl_list_for_each(head, &config->heads, link) {
if (head->state.output == output) {
wl_resource_post_error(config->resource,
ZWLR_OUTPUT_CONFIGURATION_V1_ERROR_ALREADY_CONFIGURED_HEAD,
"head has already been configured");
return false;
}
}
return true;
}
static void config_handle_enable_head(struct wl_client *client,
struct wl_resource *config_resource, uint32_t id,
struct wl_resource *head_resource) {
struct wlr_output_configuration_v1 *config =
config_from_resource(config_resource);
if (config == NULL || config->finalized) {
wl_resource_post_error(config_resource,
ZWLR_OUTPUT_CONFIGURATION_V1_ERROR_ALREADY_USED,
"configuration object has already been used");
return;
}
struct wlr_output_head_v1 *head = head_from_resource(head_resource);
// Create an inert resource if the head no longer exists
struct wlr_output_configuration_head_v1 *config_head = NULL;
if (head != NULL) {
if (!config_check_head_is_unconfigured(config, head->state.output)) {
return;
}
config_head = config_head_create(config, head->state.output);
if (config_head == NULL) {
wl_resource_post_no_memory(config_resource);
return;
}
config_head->state = head->state;
}
uint32_t version = wl_resource_get_version(config_resource);
struct wl_resource *resource = wl_resource_create(client,
&zwlr_output_configuration_head_v1_interface, version, id);
if (resource == NULL) {
wl_client_post_no_memory(client);
return;
}
wl_resource_set_implementation(resource, &config_head_impl,
config_head, config_head_handle_resource_destroy);
if (config_head != NULL) {
config_head->resource = resource;
config_head->state.enabled = true;
}
}
static void config_handle_disable_head(struct wl_client *client,
struct wl_resource *config_resource,
struct wl_resource *head_resource) {
struct wlr_output_configuration_v1 *config =
config_from_resource(config_resource);
if (config == NULL || config->finalized) {
wl_resource_post_error(config_resource,
ZWLR_OUTPUT_CONFIGURATION_V1_ERROR_ALREADY_USED,
"configuration object has already been used");
return;
}
struct wlr_output_head_v1 *head = head_from_resource(head_resource);
if (head == NULL) {
return;
}
if (!config_check_head_is_unconfigured(config, head->state.output)) {
return;
}
struct wlr_output_configuration_head_v1 *config_head =
config_head_create(config, head->state.output);
if (config_head == NULL) {
wl_resource_post_no_memory(config_resource);
return;
}
config_head->state.enabled = false;
}
// Finalizes a configuration. This prevents the same config from being used
// multiple times.
static void config_finalize(struct wlr_output_configuration_v1 *config) {
if (config->finalized) {
return;
}
// Destroy config head resources now, the client is forbidden to use them at
// this point anyway
struct wlr_output_configuration_head_v1 *config_head, *tmp;
wl_list_for_each_safe(config_head, tmp, &config->heads, link) {
// Resource is NULL if head has been disabled
if (config_head->resource != NULL) {
wl_resource_set_user_data(config_head->resource, NULL);
wl_resource_destroy(config_head->resource);
config_head->resource = NULL;
}
}
config->finalized = true;
}
// Destroys the config if serial is invalid
static bool config_validate_serial(struct wlr_output_configuration_v1 *config) {
if (config->serial != config->manager->serial) {
wlr_log(WLR_DEBUG, "Ignored configuration request: invalid serial");
zwlr_output_configuration_v1_send_cancelled(config->resource);
wlr_output_configuration_v1_destroy(config);
return false;
}
return true;
}
static void config_handle_apply(struct wl_client *client,
struct wl_resource *config_resource) {
struct wlr_output_configuration_v1 *config =
config_from_resource(config_resource);
if (config == NULL || config->finalized) {
wl_resource_post_error(config_resource,
ZWLR_OUTPUT_CONFIGURATION_V1_ERROR_ALREADY_USED,
"configuration object has already been used");
return;
}
config_finalize(config);
if (!config_validate_serial(config)) {
return;
}
wlr_signal_emit_safe(&config->manager->events.apply, config);
}
static void config_handle_test(struct wl_client *client,
struct wl_resource *config_resource) {
struct wlr_output_configuration_v1 *config =
config_from_resource(config_resource);
if (config == NULL || config->finalized) {
wl_resource_post_error(config_resource,
ZWLR_OUTPUT_CONFIGURATION_V1_ERROR_ALREADY_USED,
"configuration object has already been used");
return;
}
config_finalize(config);
if (!config_validate_serial(config)) {
return;
}
wlr_signal_emit_safe(&config->manager->events.test, config);
}
static void config_handle_destroy(struct wl_client *client,
struct wl_resource *config_resource) {
wl_resource_destroy(config_resource);
}
static const struct zwlr_output_configuration_v1_interface config_impl = {
.enable_head = config_handle_enable_head,
.disable_head = config_handle_disable_head,
.apply = config_handle_apply,
.test = config_handle_test,
.destroy = config_handle_destroy,
};
static struct wlr_output_configuration_v1 *config_create(bool finalized) {
struct wlr_output_configuration_v1 *config = calloc(1, sizeof(*config));
if (config == NULL) {
return NULL;
}
wl_list_init(&config->heads);
config->finalized = finalized;
return config;
}
struct wlr_output_configuration_v1 *wlr_output_configuration_v1_create(void) {
return config_create(true);
}
void wlr_output_configuration_v1_destroy(
struct wlr_output_configuration_v1 *config) {
if (config == NULL) {
return;
}
config_finalize(config);
if (config->resource != NULL) {
wl_resource_set_user_data(config->resource, NULL); // make inert
}
struct wlr_output_configuration_head_v1 *config_head, *tmp;
wl_list_for_each_safe(config_head, tmp, &config->heads, link) {
config_head_destroy(config_head);
}
free(config);
}
static void config_handle_resource_destroy(struct wl_resource *resource) {
struct wlr_output_configuration_v1 *config = config_from_resource(resource);
if (config == NULL) {
return;
}
if (config->finalized) {
config->resource = NULL; // we no longer own the config
} else {
wlr_output_configuration_v1_destroy(config);
}
}
void wlr_output_configuration_v1_send_succeeded(
struct wlr_output_configuration_v1 *config) {
assert(!config->finished);
if (config->resource == NULL) {
return; // client destroyed the resource early
}
zwlr_output_configuration_v1_send_succeeded(config->resource);
config->finished = true;
}
void wlr_output_configuration_v1_send_failed(
struct wlr_output_configuration_v1 *config) {
assert(!config->finished);
if (config->resource == NULL) {
return; // client destroyed the resource early
}
zwlr_output_configuration_v1_send_failed(config->resource);
config->finished = true;
}
static const struct zwlr_output_manager_v1_interface manager_impl;
static struct wlr_output_manager_v1 *manager_from_resource(
struct wl_resource *resource) {
assert(wl_resource_instance_of(resource,
&zwlr_output_manager_v1_interface, &manager_impl));
return wl_resource_get_user_data(resource);
}
static void manager_handle_create_configuration(struct wl_client *client,
struct wl_resource *manager_resource, uint32_t id, uint32_t serial) {
struct wlr_output_manager_v1 *manager =
manager_from_resource(manager_resource);
struct wlr_output_configuration_v1 *config = config_create(false);
if (config == NULL) {
wl_resource_post_no_memory(manager_resource);
return;
}
config->manager = manager;
config->serial = serial;
uint32_t version = wl_resource_get_version(manager_resource);
config->resource = wl_resource_create(client,
&zwlr_output_configuration_v1_interface, version, id);
if (config->resource == NULL) {
wl_client_post_no_memory(client);
return;
}
wl_resource_set_implementation(config->resource, &config_impl,
config, config_handle_resource_destroy);
}
static void manager_handle_stop(struct wl_client *client,
struct wl_resource *manager_resource) {
zwlr_output_manager_v1_send_finished(manager_resource);
wl_resource_destroy(manager_resource);
}
static const struct zwlr_output_manager_v1_interface manager_impl = {
.create_configuration = manager_handle_create_configuration,
.stop = manager_handle_stop,
};
static void manager_handle_resource_destroy(struct wl_resource *resource) {
wl_list_remove(wl_resource_get_link(resource));
}
static void manager_send_head(struct wlr_output_manager_v1 *manager,
struct wlr_output_head_v1 *head, struct wl_resource *manager_resource);
static void manager_bind(struct wl_client *client, void *data, uint32_t version,
uint32_t id) {
struct wlr_output_manager_v1 *manager = data;
struct wl_resource *resource = wl_resource_create(client,
&zwlr_output_manager_v1_interface, version, id);
if (resource == NULL) {
wl_client_post_no_memory(client);
return;
}
wl_resource_set_implementation(resource, &manager_impl, manager,
manager_handle_resource_destroy);
wl_list_insert(&manager->resources, wl_resource_get_link(resource));
struct wlr_output_head_v1 *head;
wl_list_for_each(head, &manager->heads, link) {
manager_send_head(manager, head, resource);
}
zwlr_output_manager_v1_send_done(resource, manager->serial);
}
static void manager_handle_display_destroy(struct wl_listener *listener,
void *data) {
struct wlr_output_manager_v1 *manager =
wl_container_of(listener, manager, display_destroy);
wlr_signal_emit_safe(&manager->events.destroy, manager);
wl_list_remove(&manager->display_destroy.link);
struct wlr_output_head_v1 *head, *tmp;
wl_list_for_each_safe(head, tmp, &manager->heads, link) {
head_destroy(head);
}
wl_global_destroy(manager->global);
free(manager);
}
struct wlr_output_manager_v1 *wlr_output_manager_v1_create(
struct wl_display *display) {
struct wlr_output_manager_v1 *manager = calloc(1, sizeof(*manager));
if (manager == NULL) {
return NULL;
}
manager->display = display;
wl_list_init(&manager->resources);
wl_list_init(&manager->heads);
wl_signal_init(&manager->events.destroy);
wl_signal_init(&manager->events.apply);
wl_signal_init(&manager->events.test);
manager->global = wl_global_create(display,
&zwlr_output_manager_v1_interface, OUTPUT_MANAGER_VERSION,
manager, manager_bind);
if (manager->global == NULL) {
free(manager);
return NULL;
}
manager->display_destroy.notify = manager_handle_display_destroy;
wl_display_add_destroy_listener(display, &manager->display_destroy);
return manager;
}
static struct wlr_output_configuration_head_v1 *configuration_get_head(
struct wlr_output_configuration_v1 *config, struct wlr_output *output) {
struct wlr_output_configuration_head_v1 *head;
wl_list_for_each(head, &config->heads, link) {
if (head->state.output == output) {
return head;
}
}
return NULL;
}
static void send_mode_state(struct wl_resource *mode_resource,
struct wlr_output_mode *mode) {
zwlr_output_mode_v1_send_size(mode_resource, mode->width, mode->height);
if (mode->refresh > 0) {
zwlr_output_mode_v1_send_refresh(mode_resource, mode->refresh);
}
if (mode->preferred) {
zwlr_output_mode_v1_send_preferred(mode_resource);
}
}
static void mode_handle_resource_destroy(struct wl_resource *resource) {
wl_list_remove(wl_resource_get_link(resource));
}
static struct wl_resource *head_send_mode(struct wlr_output_head_v1 *head,
struct wl_resource *head_resource, struct wlr_output_mode *mode) {
struct wl_client *client = wl_resource_get_client(head_resource);
uint32_t version = wl_resource_get_version(head_resource);
struct wl_resource *mode_resource =
wl_resource_create(client, &zwlr_output_mode_v1_interface, version, 0);
if (mode_resource == NULL) {
wl_resource_post_no_memory(head_resource);
return NULL;
}
wl_resource_set_implementation(mode_resource, NULL, mode,
mode_handle_resource_destroy);
wl_list_insert(&head->mode_resources, wl_resource_get_link(mode_resource));
zwlr_output_head_v1_send_mode(head_resource, mode_resource);
if (mode != NULL) {
send_mode_state(mode_resource, mode);
}
return mode_resource;
}
// Sends new head state to a client.
static void head_send_state(struct wlr_output_head_v1 *head,
struct wl_resource *head_resource, uint32_t state) {
struct wl_client *client = wl_resource_get_client(head_resource);
if (state & HEAD_STATE_ENABLED) {
zwlr_output_head_v1_send_enabled(head_resource, head->state.enabled);
// On enabling we send all current data since clients have not been
// notified about potential data changes while the head was disabled.
state = HEAD_STATE_ALL;
}
if (!head->state.enabled) {
return;
}
if (state & HEAD_STATE_MODE) {
assert(head->state.mode != NULL ||
wl_list_empty(&head->state.output->modes));
bool found = false;
struct wl_resource *mode_resource;
wl_resource_for_each(mode_resource, &head->mode_resources) {
if (wl_resource_get_client(mode_resource) == client &&
mode_from_resource(mode_resource) == head->state.mode) {
found = true;
break;
}
}
assert(found);
if (head->state.mode == NULL) {
// Fake a single output mode if output doesn't support modes
struct wlr_output_mode virtual_mode = {
.width = head->state.custom_mode.width,
.height = head->state.custom_mode.height,
.refresh = head->state.custom_mode.refresh,
};
send_mode_state(mode_resource, &virtual_mode);
}
zwlr_output_head_v1_send_current_mode(head_resource, mode_resource);
}
if (state & HEAD_STATE_POSITION) {
zwlr_output_head_v1_send_position(head_resource,
head->state.x, head->state.y);
}
if (state & HEAD_STATE_TRANSFORM) {
zwlr_output_head_v1_send_transform(head_resource,
head->state.transform);
}
if (state & HEAD_STATE_SCALE) {
zwlr_output_head_v1_send_scale(head_resource,
wl_fixed_from_double(head->state.scale));
}
}
static void head_handle_resource_destroy(struct wl_resource *resource) {
wl_list_remove(wl_resource_get_link(resource));
}
static void manager_send_head(struct wlr_output_manager_v1 *manager,
struct wlr_output_head_v1 *head, struct wl_resource *manager_resource) {
struct wlr_output *output = head->state.output;
struct wl_client *client = wl_resource_get_client(manager_resource);
uint32_t version = wl_resource_get_version(manager_resource);
struct wl_resource *head_resource = wl_resource_create(client,
&zwlr_output_head_v1_interface, version, 0);
if (head_resource == NULL) {
wl_resource_post_no_memory(manager_resource);
return;
}
wl_resource_set_implementation(head_resource, NULL, head,
head_handle_resource_destroy);
wl_list_insert(&head->resources, wl_resource_get_link(head_resource));
zwlr_output_manager_v1_send_head(manager_resource, head_resource);
zwlr_output_head_v1_send_name(head_resource, output->name);
zwlr_output_head_v1_send_description(head_resource,
output->description ? output->description : "Unknown");
if (output->phys_width > 0 && output->phys_height > 0) {
zwlr_output_head_v1_send_physical_size(head_resource,
output->phys_width, output->phys_height);
}
if (version >= ZWLR_OUTPUT_HEAD_V1_MAKE_SINCE_VERSION && output->make[0] != '\0') {
zwlr_output_head_v1_send_make(head_resource, output->make);
}
if (version >= ZWLR_OUTPUT_HEAD_V1_MODEL_SINCE_VERSION && output->model[0] != '\0') {
zwlr_output_head_v1_send_model(head_resource, output->model);
}
if (version >= ZWLR_OUTPUT_HEAD_V1_SERIAL_NUMBER_SINCE_VERSION && output->serial[0] != '\0') {
zwlr_output_head_v1_send_serial_number(head_resource, output->serial);
}
struct wlr_output_mode *mode;
wl_list_for_each(mode, &output->modes, link) {
head_send_mode(head, head_resource, mode);
}
if (wl_list_empty(&output->modes)) {
// Output doesn't support modes. Send a virtual one.
head_send_mode(head, head_resource, NULL);
}
head_send_state(head, head_resource, HEAD_STATE_ALL);
}
// Compute state that has changed and sends it to all clients. Then writes the
// new state to the head.
static bool manager_update_head(struct wlr_output_manager_v1 *manager,
struct wlr_output_head_v1 *head,
struct wlr_output_head_v1_state *next) {
struct wlr_output_head_v1_state *current = &head->state;
uint32_t state = 0;
if (current->enabled != next->enabled) {
state |= HEAD_STATE_ENABLED;
}
if (current->mode != next->mode) {
state |= HEAD_STATE_MODE;
}
if (current->custom_mode.width != next->custom_mode.width ||
current->custom_mode.height != next->custom_mode.height ||
current->custom_mode.refresh != next->custom_mode.refresh) {
state |= HEAD_STATE_MODE;
}
if (current->x != next->x || current->y != next->y) {
state |= HEAD_STATE_POSITION;
}
if (current->transform != next->transform) {
state |= HEAD_STATE_TRANSFORM;
}
if (current->scale != next->scale) {
state |= HEAD_STATE_SCALE;
}
// If a mode was added to wlr_output.modes we need to add the new mode
// to the wlr_output_head
struct wlr_output_mode *mode;
wl_list_for_each(mode, &head->state.output->modes, link) {
bool found = false;
struct wl_resource *mode_resource;
wl_resource_for_each(mode_resource, &head->mode_resources) {
if (mode_from_resource(mode_resource) == mode) {
found = true;
break;
}
}
if (!found) {
struct wl_resource *resource;
wl_resource_for_each(resource, &head->resources) {
head_send_mode(head, resource, mode);
}
}
}
if (state != 0) {
*current = *next;
struct wl_resource *resource;
wl_resource_for_each(resource, &head->resources) {
head_send_state(head, resource, state);
}
}
return state != 0;
}
void wlr_output_manager_v1_set_configuration(
struct wlr_output_manager_v1 *manager,
struct wlr_output_configuration_v1 *config) {
bool changed = manager->current_configuration_dirty;
// Either update or destroy existing heads
struct wlr_output_head_v1 *existing_head, *head_tmp;
wl_list_for_each_safe(existing_head, head_tmp, &manager->heads, link) {
struct wlr_output_configuration_head_v1 *updated_head =
configuration_get_head(config, existing_head->state.output);
if (updated_head != NULL) {
changed |= manager_update_head(manager,
existing_head, &updated_head->state);
config_head_destroy(updated_head);
} else {
head_destroy(existing_head);
changed = true;
}
}
// Heads remaining in `config` are new heads
// Move new heads to current config
struct wlr_output_configuration_head_v1 *config_head, *config_head_tmp;
wl_list_for_each_safe(config_head, config_head_tmp, &config->heads, link) {
struct wlr_output_head_v1 *head =
head_create(manager, config_head->state.output);
if (head == NULL) {
wlr_log_errno(WLR_ERROR, "Allocation failed");
continue;
}
head->state = config_head->state;
struct wl_resource *manager_resource;
wl_resource_for_each(manager_resource, &manager->resources) {
manager_send_head(manager, head, manager_resource);
}
changed = true;
}
wlr_output_configuration_v1_destroy(config);
if (!changed) {
return;
}
manager->serial = wl_display_next_serial(manager->display);
struct wl_resource *manager_resource;
wl_resource_for_each(manager_resource, &manager->resources) {
zwlr_output_manager_v1_send_done(manager_resource,
manager->serial);
}
manager->current_configuration_dirty = false;
}
| 1 | 16,367 | This makes me feel icky. We could also go the route of making `wlr_output.scale` a double instead of a float, but that would be a more widely reaching change. | swaywm-wlroots | c |
@@ -219,7 +219,7 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info)
while (true) {
if (!vp->atp &&
!(node = (vp->nl) ? vp->nl->GetItem(g, vp->k++, tdp->Usedom ? node : NULL)
- : NULL))
+ : NULL)) {
if (j) {
vp = lvlp[--j];
| 1 | /************* Tabxml C++ Program Source Code File (.CPP) **************/
/* PROGRAM NAME: TABXML */
/* ------------- */
/* Version 3.0 */
/* */
/* Author Olivier BERTRAND 2007 - 2017 */
/* */
/* This program are the XML tables classes using MS-DOM or libxml2. */
/***********************************************************************/
/***********************************************************************/
/* Include required compiler header files. */
/***********************************************************************/
#include "my_global.h"
#include <stdio.h>
#include <fcntl.h>
#include <errno.h>
#if defined(__WIN__)
#include <io.h>
#include <winsock2.h>
//#include <windows.h>
#include <comdef.h>
#else // !__WIN__
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <unistd.h>
//#include <ctype.h>
#include "osutil.h"
#define _O_RDONLY O_RDONLY
#endif // !__WIN__
#include "resource.h" // for IDS_COLUMNS
#define INCLUDE_TDBXML
#define NODE_TYPE_LIST
/***********************************************************************/
/* Include application header files: */
/* global.h is header containing all global declarations. */
/* plgdbsem.h is header containing the DB application declarations. */
/* tabdos.h is header containing the TABDOS class declarations. */
/***********************************************************************/
#include "global.h"
#include "plgdbsem.h"
//#include "reldef.h"
#include "xtable.h"
#include "colblk.h"
#include "mycat.h"
#include "xindex.h"
#include "plgxml.h"
#include "tabxml.h"
#include "tabmul.h"
extern "C" char version[];
#if defined(__WIN__) && defined(DOMDOC_SUPPORT)
#define XMLSUP "MS-DOM"
#else // !__WIN__
#define XMLSUP "libxml2"
#endif // !__WIN__
#define TYPE_UNKNOWN 12 /* Must be greater than other types */
#define XLEN(M) sizeof(M) - strlen(M) - 1 /* To avoid overflow*/
/***********************************************************************/
/* Class and structure used by XMLColumns. */
/***********************************************************************/
typedef class XMCOL *PXCL;
class XMCOL : public BLOCK {
public:
// Constructors
XMCOL(void) {Next = NULL;
Name[0] = 0;
Fmt = NULL;
Type = 1;
Len = Scale = 0;
Cbn = false;
Found = true;}
XMCOL(PGLOBAL g, PXCL xp, char *fmt, int i) {
Next = NULL;
strcpy(Name, xp->Name);
Fmt = (*fmt) ? PlugDup(g, fmt) : NULL;
Type = xp->Type;
Len = xp->Len;
Scale = xp->Scale;
Cbn = (xp->Cbn || i > 1);
Found = true;}
// Members
PXCL Next;
char Name[64];
char *Fmt;
int Type;
int Len;
int Scale;
bool Cbn;
bool Found;
}; // end of class XMCOL
typedef struct LVL {
PXNODE pn;
PXLIST nl;
PXATTR atp;
bool b;
long k;
int m, n;
} *PLVL;
/***********************************************************************/
/* XMLColumns: construct the result blocks containing the description */
/* of all the columns of a table contained inside an XML file. */
/***********************************************************************/
PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info)
{
static int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT,
TYPE_INT, TYPE_SHORT, TYPE_SHORT, TYPE_STRING};
static XFLD fldtyp[] = {FLD_NAME, FLD_TYPE, FLD_TYPENAME, FLD_PREC,
FLD_LENGTH, FLD_SCALE, FLD_NULL, FLD_FORMAT};
static unsigned int length[] = {0, 6, 8, 10, 10, 6, 6, 0};
char colname[65], fmt[129], buf[512];
int i, j, lvl, n = 0;
int ncol = sizeof(buftyp) / sizeof(int);
bool ok = true;
PCSZ fn, op;
PXCL xcol, xcp, fxcp = NULL, pxcp = NULL;
PLVL *lvlp, vp;
PXNODE node = NULL;
PXMLDEF tdp;
PTDBXML txmp;
PQRYRES qrp;
PCOLRES crp;
if (info) {
length[0] = 128;
length[7] = 256;
goto skipit;
} // endif info
if (GetIntegerTableOption(g, topt, "Multiple", 0)) {
strcpy(g->Message, "Cannot find column definition for multiple table");
return NULL;
} // endif Multiple
/*********************************************************************/
/* Open the input file. */
/*********************************************************************/
if (!(fn = GetStringTableOption(g, topt, "Filename", NULL))) {
strcpy(g->Message, MSG(MISSING_FNAME));
return NULL;
} else {
lvl = GetIntegerTableOption(g, topt, "Level", 0);
lvl = (lvl < 0) ? 0 : (lvl > 16) ? 16 : lvl;
} // endif fn
if (trace(1))
htrc("File %s lvl=%d\n", topt->filename, lvl);
tdp = new(g) XMLDEF;
tdp->Fn = fn;
if (!(tdp->Database = SetPath(g, db)))
return NULL;
tdp->Tabname = tab;
tdp->Tabname = (char*)GetStringTableOption(g, topt, "Tabname", tab);
tdp->Rowname = (char*)GetStringTableOption(g, topt, "Rownode", NULL);
tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false);
tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL);
tdp->Skip = GetBooleanTableOption(g, topt, "Skipnull", false);
if (!(op = GetStringTableOption(g, topt, "Xmlsup", NULL)))
#if defined(__WIN__)
tdp->Usedom = true;
#else // !__WIN__
tdp->Usedom = false;
#endif // !__WIN__
else
tdp->Usedom = (toupper(*op) == 'M' || toupper(*op) == 'D');
txmp = new(g) TDBXML(tdp);
if (txmp->Initialize(g))
goto err;
xcol = new(g) XMCOL;
colname[64] = 0;
fmt[128] = 0;
lvlp = (PLVL*)PlugSubAlloc(g, NULL, sizeof(PLVL) * (lvl + 1));
for (j = 0; j <= lvl; j++)
lvlp[j] = (PLVL)PlugSubAlloc(g, NULL, sizeof(LVL));
/*********************************************************************/
/* Analyse the XML tree and define columns. */
/*********************************************************************/
for (i = 1; ; i++) {
// Get next row
switch (txmp->ReadDB(g)) {
case RC_EF:
vp = NULL;
break;
case RC_FX:
goto err;
default:
vp = lvlp[0];
vp->pn = txmp->RowNode;
vp->atp = vp->pn->GetAttribute(g, NULL);
vp->nl = vp->pn->GetChildElements(g);
vp->b = true;
vp->k = 0;
vp->m = vp->n = 0;
j = 0;
} // endswitch ReadDB
if (!vp)
break;
while (true) {
if (!vp->atp &&
!(node = (vp->nl) ? vp->nl->GetItem(g, vp->k++, tdp->Usedom ? node : NULL)
: NULL))
if (j) {
vp = lvlp[--j];
if (!tdp->Usedom) // nl was destroyed
vp->nl = vp->pn->GetChildElements(g);
if (!lvlp[j+1]->b) {
vp->k--;
ok = false;
} // endif b
continue;
} else
break;
xcol->Name[vp->n] = 0;
fmt[vp->m] = 0;
more:
if (vp->atp) {
strncpy(colname, vp->atp->GetName(g), sizeof(colname));
strncat(xcol->Name, colname, XLEN(xcol->Name));
switch (vp->atp->GetText(g, buf, sizeof(buf))) {
case RC_INFO:
PushWarning(g, txmp);
case RC_OK:
strncat(fmt, "@", XLEN(fmt));
break;
default:
goto err;
} // enswitch rc
if (j)
strncat(fmt, colname, XLEN(fmt));
} else {
if (tdp->Usedom && node->GetType() != 1)
continue;
strncpy(colname, node->GetName(g), sizeof(colname));
strncat(xcol->Name, colname, XLEN(xcol->Name));
if (j)
strncat(fmt, colname, XLEN(fmt));
if (j < lvl && ok) {
vp = lvlp[j+1];
vp->k = 0;
vp->pn = node;
vp->atp = node->GetAttribute(g, NULL);
vp->nl = node->GetChildElements(g);
if (tdp->Usedom && vp->nl->GetLength() == 1) {
node = vp->nl->GetItem(g, 0, node);
vp->b = (node->GetType() == 1); // Must be ab element
} else
vp->b = (vp->nl && vp->nl->GetLength());
if (vp->atp || vp->b) {
if (!vp->atp)
node = vp->nl->GetItem(g, vp->k++, tdp->Usedom ? node : NULL);
if (!j)
strncat(fmt, colname, XLEN(fmt));
strncat(fmt, "/", XLEN(fmt));
strncat(xcol->Name, "_", XLEN(xcol->Name));
j++;
vp->n = (int)strlen(xcol->Name);
vp->m = (int)strlen(fmt);
goto more;
} else {
vp = lvlp[j];
if (!tdp->Usedom) // nl was destroyed
vp->nl = vp->pn->GetChildElements(g);
} // endif vp
} else
ok = true;
switch (node->GetContent(g, buf, sizeof(buf))) {
case RC_INFO:
PushWarning(g, txmp);
case RC_OK:
xcol->Cbn = !strlen(buf);
break;
default:
goto err;
} // enswitch rc
} // endif atp;
xcol->Len = strlen(buf);
// Check whether this column was already found
for (xcp = fxcp; xcp; xcp = xcp->Next)
if (!strcmp(xcol->Name, xcp->Name))
break;
if (xcp) {
if (xcp->Type != xcol->Type)
xcp->Type = TYPE_STRING;
if (*fmt && (!xcp->Fmt || strlen(xcp->Fmt) < strlen(fmt))) {
xcp->Fmt = PlugDup(g, fmt);
length[7] = MY_MAX(length[7], strlen(fmt));
} // endif *fmt
xcp->Len = MY_MAX(xcp->Len, xcol->Len);
xcp->Scale = MY_MAX(xcp->Scale, xcol->Scale);
xcp->Cbn |= (xcol->Cbn || !xcol->Len);
xcp->Found = true;
} else if(xcol->Len || !tdp->Skip) {
// New column
xcp = new(g) XMCOL(g, xcol, fmt, i);
length[0] = MY_MAX(length[0], strlen(xcol->Name));
length[7] = MY_MAX(length[7], strlen(fmt));
if (pxcp) {
xcp->Next = pxcp->Next;
pxcp->Next = xcp;
} else
fxcp = xcp;
n++;
} // endif xcp
if (xcp)
pxcp = xcp;
if (vp->atp)
vp->atp = vp->atp->GetNext(g);
} // endwhile
// Missing column can be null
for (xcp = fxcp; xcp; xcp = xcp->Next) {
xcp->Cbn |= !xcp->Found;
xcp->Found = false;
} // endfor xcp
} // endor i
txmp->CloseDB(g);
skipit:
if (trace(1))
htrc("XMLColumns: n=%d len=%d\n", n, length[0]);
/*********************************************************************/
/* Allocate the structures used to refer to the result set. */
/*********************************************************************/
qrp = PlgAllocResult(g, ncol, n, IDS_COLUMNS + 3,
buftyp, fldtyp, length, false, false);
crp = qrp->Colresp->Next->Next->Next->Next->Next->Next;
crp->Name = "Nullable";
crp->Next->Name = "Xpath";
if (info || !qrp)
return qrp;
qrp->Nblin = n;
/*********************************************************************/
/* Now get the results into blocks. */
/*********************************************************************/
for (i = 0, xcp = fxcp; xcp; i++, xcp = xcp->Next) {
if (xcp->Type == TYPE_UNKNOWN) // Void column
xcp->Type = TYPE_STRING;
crp = qrp->Colresp; // Column Name
crp->Kdata->SetValue(xcp->Name, i);
crp = crp->Next; // Data Type
crp->Kdata->SetValue(xcp->Type, i);
crp = crp->Next; // Type Name
crp->Kdata->SetValue(GetTypeName(xcp->Type), i);
crp = crp->Next; // Precision
crp->Kdata->SetValue(xcp->Len, i);
crp = crp->Next; // Length
crp->Kdata->SetValue(xcp->Len, i);
crp = crp->Next; // Scale (precision)
crp->Kdata->SetValue(xcp->Scale, i);
crp = crp->Next; // Nullable
crp->Kdata->SetValue(xcp->Cbn ? 1 : 0, i);
crp = crp->Next; // Field format
if (crp->Kdata)
crp->Kdata->SetValue(xcp->Fmt, i);
} // endfor i
/*********************************************************************/
/* Return the result pointer. */
/*********************************************************************/
return qrp;
err:
txmp->CloseDB(g);
return NULL;
} // end of XMLColumns
/* -------------- Implementation of the XMLDEF class ---------------- */
/***********************************************************************/
/* Constructor. */
/***********************************************************************/
XMLDEF::XMLDEF(void)
{
Pseudo = 3;
Fn = NULL;
Encoding = NULL;
Tabname = NULL;
Rowname = NULL;
Colname = NULL;
Mulnode = NULL;
XmlDB = NULL;
Nslist = NULL;
DefNs = NULL;
Attrib = NULL;
Hdattr = NULL;
Entry = NULL;
Coltype = 1;
Limit = 0;
Header = 0;
Xpand = false;
Usedom = false;
Zipped = false;
Mulentries = false;
Skip = false;
} // end of XMLDEF constructor
/***********************************************************************/
/* DefineAM: define specific AM block values from XDB file. */
/***********************************************************************/
bool XMLDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
{
PCSZ defrow, defcol;
char buf[10];
Fn = GetStringCatInfo(g, "Filename", NULL);
Encoding = GetStringCatInfo(g, "Encoding", "UTF-8");
if (*Fn == '?') {
strcpy(g->Message, MSG(MISSING_FNAME));
return true;
} // endif fn
if ((signed)GetIntCatInfo("Flag", -1) != -1) {
strcpy(g->Message, MSG(DEPREC_FLAG));
return true;
} // endif flag
defrow = defcol = NULL;
GetCharCatInfo("Coltype", "", buf, sizeof(buf));
switch (toupper(*buf)) {
case 'A': // Attribute
case '@':
case '0':
Coltype = 0;
break;
case '\0': // Default
case 'T': // Tag
case 'N': // Node
case '1':
Coltype = 1;
break;
case 'C': // Column
case 'P': // Position
case 'H': // HTML
case '2':
Coltype = 2;
defrow = "TR";
defcol = "TD";
break;
default:
sprintf(g->Message, MSG(INV_COL_TYPE), buf);
return true;
} // endswitch typname
Tabname = GetStringCatInfo(g, "Name", Name); // Deprecated
Tabname = GetStringCatInfo(g, "Table_name", Tabname); // Deprecated
Tabname = GetStringCatInfo(g, "Tabname", Tabname);
Rowname = GetStringCatInfo(g, "Rownode", defrow);
Colname = GetStringCatInfo(g, "Colnode", defcol);
Mulnode = GetStringCatInfo(g, "Mulnode", NULL);
XmlDB = GetStringCatInfo(g, "XmlDB", NULL);
Nslist = GetStringCatInfo(g, "Nslist", NULL);
DefNs = GetStringCatInfo(g, "DefNs", NULL);
Limit = GetIntCatInfo("Limit", 10);
Xpand = GetBoolCatInfo("Expand", false);
Header = GetIntCatInfo("Header", 0);
GetCharCatInfo("Xmlsup", "*", buf, sizeof(buf));
// Note that if no support is specified, the default is MS-DOM
// on Windows and libxml2 otherwise
if (*buf == '*')
#if defined(__WIN__)
Usedom = true;
#else // !__WIN__
Usedom = false;
#endif // !__WIN__
else
Usedom = (toupper(*buf) == 'M' || toupper(*buf) == 'D');
// Get eventual table node attribute
Attrib = GetStringCatInfo(g, "Attribute", NULL);
Hdattr = GetStringCatInfo(g, "HeadAttr", NULL);
// Specific for zipped files
if ((Zipped = GetBoolCatInfo("Zipped", false)))
Mulentries = ((Entry = GetStringCatInfo(g, "Entry", NULL)))
? strchr(Entry, '*') || strchr(Entry, '?')
: GetBoolCatInfo("Mulentries", false);
return false;
} // end of DefineAM
/***********************************************************************/
/* GetTable: makes a new TDB of the proper type. */
/***********************************************************************/
PTDB XMLDEF::GetTable(PGLOBAL g, MODE m)
{
if (Catfunc == FNC_COL)
return new(g) TDBXCT(this);
if (Zipped && !(m == MODE_READ || m == MODE_ANY)) {
strcpy(g->Message, "ZIpped XML tables are read only");
return NULL;
} // endif Zipped
PTDBASE tdbp = new(g) TDBXML(this);
if (Multiple)
tdbp = new(g) TDBMUL(tdbp);
return tdbp;
} // end of GetTable
/* ------------------------- TDBXML Class ---------------------------- */
/***********************************************************************/
/* Implementation of the TDBXML constuctor. */
/***********************************************************************/
TDBXML::TDBXML(PXMLDEF tdp) : TDBASE(tdp)
{
Docp = NULL;
Root = NULL;
Curp = NULL;
DBnode = NULL;
TabNode = NULL;
RowNode = NULL;
ColNode = NULL;
Nlist = NULL;
Clist = NULL;
To_Xb = NULL;
Colp = NULL;
Xfile = tdp->Fn;
Enc = tdp->Encoding;
Tabname = tdp->Tabname;
#if 0 // why all these?
Rowname = (tdp->Rowname) ? tdp->Rowname : NULL;
Colname = (tdp->Colname) ? tdp->Colname : NULL;
Mulnode = (tdp->Mulnode) ? tdp->Mulnode : NULL;
XmlDB = (tdp->XmlDB) ? tdp->XmlDB : NULL;
Nslist = (tdp->Nslist) ? tdp->Nslist : NULL;
DefNs = (tdp->DefNs) ? tdp->DefNs : NULL;
Attrib = (tdp->Attrib) ? tdp->Attrib : NULL;
Hdattr = (tdp->Hdattr) ? tdp->Hdattr : NULL;
#endif // 0
Rowname = tdp->Rowname;
Colname = tdp->Colname;
Mulnode = tdp->Mulnode;
XmlDB = tdp->XmlDB;
Nslist = tdp->Nslist;
DefNs = tdp->DefNs;
Attrib = tdp->Attrib;
Hdattr = tdp->Hdattr;
Entry = tdp->Entry;
Coltype = tdp->Coltype;
Limit = tdp->Limit;
Xpand = tdp->Xpand;
Zipped = tdp->Zipped;
Mulentries = tdp->Mulentries;
Changed = false;
Checked = false;
NextSame = false;
NewRow = false;
Hasnod = false;
Write = false;
Bufdone = false;
Nodedone = false;
Void = false;
Usedom = tdp->Usedom;
Header = tdp->Header;
Multiple = tdp->Multiple;
Nrow = -1;
Irow = Header - 1;
Nsub = 0;
N = 0;
} // end of TDBXML constructor
TDBXML::TDBXML(PTDBXML tdbp) : TDBASE(tdbp)
{
Docp = tdbp->Docp;
Root = tdbp->Root;
Curp = tdbp->Curp;
DBnode = tdbp->DBnode;
TabNode = tdbp->TabNode;
RowNode = tdbp->RowNode;
ColNode = tdbp->ColNode;
Nlist = tdbp->Nlist;
Clist = tdbp->Clist;
To_Xb = tdbp->To_Xb;
Colp = tdbp->Colp;
Xfile = tdbp->Xfile;
Enc = tdbp->Enc;
Tabname = tdbp->Tabname;
Rowname = tdbp->Rowname;
Colname = tdbp->Colname;
Mulnode = tdbp->Mulnode;
XmlDB = tdbp->XmlDB;
Nslist = tdbp->Nslist;
DefNs = tdbp->DefNs;
Attrib = tdbp->Attrib;
Hdattr = tdbp->Hdattr;
Entry = tdbp->Entry;
Coltype = tdbp->Coltype;
Limit = tdbp->Limit;
Xpand = tdbp->Xpand;
Zipped = tdbp->Zipped;
Mulentries = tdbp->Mulentries;
Changed = tdbp->Changed;
Checked = tdbp->Checked;
NextSame = tdbp->NextSame;
NewRow = tdbp->NewRow;
Hasnod = tdbp->Hasnod;
Write = tdbp->Write;
Void = tdbp->Void;
Usedom = tdbp->Usedom;
Header = tdbp->Header;
Multiple = tdbp->Multiple;
Nrow = tdbp->Nrow;
Irow = tdbp->Irow;
Nsub = tdbp->Nsub;
N = tdbp->N;
} // end of TDBXML copy constructor
// Used for update
PTDB TDBXML::Clone(PTABS t)
{
PTDB tp;
PXMLCOL cp1, cp2;
PGLOBAL g = t->G;
tp = new(g) TDBXML(this);
for (cp1 = (PXMLCOL)Columns; cp1; cp1 = (PXMLCOL)cp1->GetNext()) {
cp2 = new(g) XMLCOL(cp1, tp); // Make a copy
NewPointer(t, cp1, cp2);
} // endfor cp1
return tp;
} // end of Clone
/***********************************************************************/
/* Must not be in tabxml.h because of OEM tables */
/***********************************************************************/
const CHARSET_INFO *TDBXML::data_charset()
{
return &my_charset_utf8_general_ci;
} // end of data_charset
/***********************************************************************/
/* Allocate XML column description block. */
/***********************************************************************/
PCOL TDBXML::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
{
if (trace(1))
htrc("TDBXML: MakeCol %s n=%d\n", (cdp) ? cdp->GetName() : "<null>", n);
return new(g) XMLCOL(cdp, this, cprec, n);
} // end of MakeCol
/***********************************************************************/
/* InsertSpecialColumn: Put a special column ahead of the column list.*/
/***********************************************************************/
PCOL TDBXML::InsertSpecialColumn(PCOL colp)
{
if (!colp->IsSpecial())
return NULL;
//if (Xpand && ((SPCBLK*)colp)->GetRnm())
// colp->SetKey(0); // Rownum is no more a key
colp->SetNext(Columns);
Columns = colp;
return colp;
} // end of InsertSpecialColumn
/***********************************************************************/
/* LoadTableFile: Load and parse an XML file. */
/***********************************************************************/
int TDBXML::LoadTableFile(PGLOBAL g, char *filename)
{
int rc = RC_OK, type = (Usedom) ? TYPE_FB_XML : TYPE_FB_XML2;
PFBLOCK fp = NULL;
PDBUSER dup = (PDBUSER)g->Activityp->Aptr;
if (Docp)
return rc; // Already done
if (trace(1))
htrc("TDBXML: loading %s\n", filename);
/*********************************************************************/
/* Firstly we check whether this file have been already loaded. */
/*********************************************************************/
if ((Mode == MODE_READ || Mode == MODE_ANY) && !Zipped)
for (fp = dup->Openlist; fp; fp = fp->Next)
if (fp->Type == type && fp->Length && fp->Count)
if (!stricmp(fp->Fname, filename))
break;
if (fp) {
/*******************************************************************/
/* File already loaded. Just increment use count and get pointer. */
/*******************************************************************/
fp->Count++;
Docp = (Usedom) ? GetDomDoc(g, Nslist, DefNs, Enc, fp)
: GetLibxmlDoc(g, Nslist, DefNs, Enc, fp);
} else {
/*******************************************************************/
/* Parse the XML file. */
/*******************************************************************/
if (!(Docp = (Usedom) ? GetDomDoc(g, Nslist, DefNs, Enc)
: GetLibxmlDoc(g, Nslist, DefNs, Enc)))
return RC_FX;
// Initialize the implementation
if (Docp->Initialize(g, Entry, Zipped)) {
sprintf(g->Message, MSG(INIT_FAILED), (Usedom) ? "DOM" : "libxml2");
return RC_FX;
} // endif init
if (trace(1))
htrc("TDBXML: parsing %s rc=%d\n", filename, rc);
// Parse the XML file
if (Docp->ParseFile(g, filename)) {
// Does the file exist?
int h= global_open(g, MSGID_NONE, filename, _O_RDONLY);
if (h != -1) {
rc = (!_filelength(h)) ? RC_EF : RC_INFO;
close(h);
} else
rc = (errno == ENOENT) ? RC_NF : RC_INFO;
// Cannot make a Xblock until document is made
return rc;
} // endif Docp
/*******************************************************************/
/* Link a Xblock. This make possible to reuse already opened docs */
/* and also to automatically close them in case of error g->jump. */
/*******************************************************************/
fp = Docp->LinkXblock(g, Mode, rc, filename);
} // endif xp
To_Xb = fp; // Useful when closing
return rc;
} // end of LoadTableFile
/***********************************************************************/
/* Initialize the processing of the XML file. */
/* Note: this function can be called several times, eventally before */
/* the columns are known (from TBL for instance) */
/***********************************************************************/
bool TDBXML::Initialize(PGLOBAL g)
{
int rc;
PXMLCOL colp;
if (Void)
return false;
if (Columns && !Bufdone) {
// Allocate the buffers that will contain node values
for (colp = (PXMLCOL)Columns; colp; colp = (PXMLCOL)colp->GetNext())
if (!colp->IsSpecial()) // Not a pseudo column
if (colp->AllocBuf(g, Mode == MODE_INSERT))
return true;
Bufdone = true;
} // endif Bufdone
#if !defined(UNIX)
if (!Root) try {
#else
if (!Root) {
#endif
char tabpath[64], filename[_MAX_PATH];
// We used the file name relative to recorded datapath
PlugSetPath(filename, Xfile, GetPath());
// Load or re-use the table file
rc = LoadTableFile(g, filename);
if (rc == RC_OK) {
// Get root node
if (!(Root = Docp->GetRoot(g))) {
// This should never happen as load should have failed
strcpy(g->Message, MSG(EMPTY_DOC));
goto error;
} // endif Root
// If tabname is not an Xpath,
// construct one that will find it anywhere
if (!strchr(Tabname, '/'))
strcat(strcpy(tabpath, "//"), Tabname);
else
strcpy(tabpath, Tabname);
// Evaluate table xpath
if ((TabNode = Root->SelectSingleNode(g, tabpath))) {
if (TabNode->GetType() != XML_ELEMENT_NODE) {
sprintf(g->Message, MSG(BAD_NODE_TYPE), TabNode->GetType());
goto error;
} // endif Type
} else if (Mode == MODE_INSERT && XmlDB) {
// We are adding a new table to a multi-table file
// If XmlDB is not an Xpath,
// construct one that will find it anywhere
if (!strchr(XmlDB, '/'))
strcat(strcpy(tabpath, "//"), XmlDB);
else
strcpy(tabpath, XmlDB);
if (!(DBnode = Root->SelectSingleNode(g, tabpath))) {
// DB node does not exist yet; we cannot create it
// because we don't know where it should be placed
sprintf(g->Message, MSG(MISSING_NODE), XmlDB, Xfile);
goto error;
} // endif DBnode
if (!(TabNode = DBnode->AddChildNode(g, Tabname))) {
sprintf(g->Message, MSG(FAIL_ADD_NODE), Tabname);
goto error;
} // endif TabNode
DBnode->AddText(g, "\n");
} else {
TabNode = Root; // Try this ?
Tabname = TabNode->GetName(g);
} // endif's
} else if (rc == RC_NF || rc == RC_EF) {
// The XML file does not exist or is void
if (Mode == MODE_INSERT) {
// New Document
char buf[64];
// Create the XML node
if (Docp->NewDoc(g, "1.0")) {
strcpy(g->Message, MSG(NEW_DOC_FAILED));
goto error;
} // endif NewDoc
// Now we can link the Xblock
To_Xb = Docp->LinkXblock(g, Mode, rc, filename);
// Add a CONNECT comment node
strcpy(buf, " Created by the MariaDB CONNECT Storage Engine");
Docp->AddComment(g, buf);
if (XmlDB) {
// This is a multi-table file
DBnode = Root = Docp->NewRoot(g, XmlDB);
DBnode->AddText(g, "\n");
TabNode = DBnode->AddChildNode(g, Tabname);
DBnode->AddText(g, "\n");
} else
TabNode = Root = Docp->NewRoot(g, Tabname);
if (TabNode == NULL || Root == NULL) {
strcpy(g->Message, MSG(XML_INIT_ERROR));
goto error;
} else if (SetTabNode(g))
goto error;
} else {
sprintf(g->Message, MSG(FILE_UNFOUND), Xfile);
if (Mode == MODE_READ) {
PushWarning(g, this);
Void = true;
} // endif Mode
goto error;
} // endif Mode
} else if (rc == RC_INFO) {
// Loading failed
sprintf(g->Message, MSG(LOADING_FAILED), Xfile);
goto error;
} else // (rc == RC_FX)
goto error;
if (!Rowname) {
for (PXNODE n = TabNode->GetChild(g); n; n = n->GetNext(g))
if (n->GetType() == XML_ELEMENT_NODE) {
Rowname = n->GetName(g);
break;
} // endif Type
if (!Rowname)
Rowname = TabNode->GetName(g);
} // endif Rowname
// Get row node list
if (strcmp(Rowname, Tabname))
Nlist = TabNode->SelectNodes(g, Rowname);
else
Nrow = 1;
Docp->SetNofree(true); // For libxml2
#if defined(__WIN__)
} catch (_com_error e) {
// We come here if a DOM command threw an error
char buf[128];
rc = WideCharToMultiByte(CP_ACP, 0, e.Description(), -1,
buf, sizeof(buf), NULL, NULL);
if (rc)
sprintf(g->Message, "%s: %s", MSG(COM_ERROR), buf);
else
sprintf(g->Message, "%s hr=%x", MSG(COM_ERROR), e.Error());
goto error;
#endif // __WIN__
#if !defined(UNIX)
} catch(...) {
// Other errors
strcpy(g->Message, MSG(XMLTAB_INIT_ERR));
goto error;
#endif
} // end of try-catches
if (Root && Columns && (Multiple || !Nodedone)) {
// Allocate class nodes to avoid dynamic allocation
for (colp = (PXMLCOL)Columns; colp; colp = (PXMLCOL)colp->GetNext())
if (!colp->IsSpecial()) // Not a pseudo column
colp->AllocNodes(g, Docp);
Nodedone = true;
} // endif Nodedone
if (Nrow < 0)
Nrow = (Nlist) ? Nlist->GetLength() : 0;
// Init is Ok
return false;
error:
if (Docp)
Docp->CloseDoc(g, To_Xb);
return !Void;
} // end of Initialize
/***********************************************************************/
/* Set TabNode attributes or header. */
/***********************************************************************/
bool TDBXML::SetTabNode(PGLOBAL g)
{
assert(Mode == MODE_INSERT);
if (Attrib)
SetNodeAttr(g, Attrib, TabNode);
if (Header) {
PCOLDEF cdp;
PXNODE rn, cn;
if (Rowname) {
TabNode->AddText(g, "\n\t");
rn = TabNode->AddChildNode(g, Rowname, NULL);
} else {
strcpy(g->Message, MSG(NO_ROW_NODE));
return true;
} // endif Rowname
if (Hdattr)
SetNodeAttr(g, Hdattr, rn);
for (cdp = To_Def->GetCols(); cdp; cdp = cdp->GetNext()) {
rn->AddText(g, "\n\t\t");
cn = rn->AddChildNode(g, "TH", NULL);
cn->SetContent(g, (char *)cdp->GetName(),
strlen(cdp->GetName()) + 1);
} // endfor cdp
rn->AddText(g, "\n\t");
} // endif ColType
return false;
} // end of SetTabNode
/***********************************************************************/
/* Set attributes of a table or header node. */
/***********************************************************************/
void TDBXML::SetNodeAttr(PGLOBAL g, char *attr, PXNODE node)
{
char *p, *pa, *pn = attr;
PXATTR an;
do {
if ((p = strchr(pn, '='))) {
pa = pn;
*p++ = 0;
if ((pn = strchr(p, ';')))
*pn++ = 0;
an = node->AddProperty(g, pa, NULL);
an->SetText(g, p, strlen(p) + 1);
} else
break;
} while (pn);
} // end of SetNodeAttr
/***********************************************************************/
/* XML Cardinality: returns table cardinality in number of rows. */
/* This function can be called with a null argument to test the */
/* availability of Cardinality implementation (1 yes, 0 no). */
/***********************************************************************/
int TDBXML::Cardinality(PGLOBAL g)
{
if (!g)
return (Multiple || Xpand || Coltype == 2) ? 0 : 1;
if (Multiple)
return 10;
if (Nrow < 0)
if (Initialize(g))
return -1;
return (Void) ? 0 : Nrow - Header;
} // end of Cardinality
/***********************************************************************/
/* XML GetMaxSize: returns the number of tables in the database. */
/***********************************************************************/
int TDBXML::GetMaxSize(PGLOBAL g)
{
if (MaxSize < 0) {
if (!Multiple)
MaxSize = Cardinality(g) * ((Xpand) ? Limit : 1);
else
MaxSize = 10;
} // endif MaxSize
return MaxSize;
} // end of GetMaxSize
/***********************************************************************/
/* Return the position in the table. */
/***********************************************************************/
int TDBXML::GetRecpos(void)
{
union {
uint Rpos;
BYTE Spos[4];
};
Rpos = htonl(Irow);
Spos[0] = (BYTE)Nsub;
return Rpos;
} // end of GetRecpos
/***********************************************************************/
/* RowNumber: return the ordinal number of the current row. */
/***********************************************************************/
int TDBXML::RowNumber(PGLOBAL g, bool b)
{
if (To_Kindex && (Xpand || Coltype == 2) && !b) {
/*******************************************************************/
/* Don't know how to retrieve RowID for expanded XML tables. */
/*******************************************************************/
sprintf(g->Message, MSG(NO_ROWID_FOR_AM),
GetAmName(g, GetAmType()));
return 0; // Means error
} else
return (b || !(Xpand || Coltype == 2)) ? Irow - Header + 1 : N;
} // end of RowNumber
/***********************************************************************/
/* XML Access Method opening routine. */
/***********************************************************************/
bool TDBXML::OpenDB(PGLOBAL g)
{
if (Use == USE_OPEN) {
/*******************************************************************/
/* Table already open replace it at its beginning. */
/*******************************************************************/
if (!To_Kindex) {
Irow = Header - 1;
Nsub = 0;
} else
/*****************************************************************/
/* Table is to be accessed through a sorted index table. */
/*****************************************************************/
To_Kindex->Reset();
return false;
} // endif use
/*********************************************************************/
/* OpenDB: initialize the XML file processing. */
/*********************************************************************/
Write = (Mode == MODE_INSERT || Mode == MODE_UPDATE);
if (Initialize(g))
return true;
NewRow = (Mode == MODE_INSERT);
Nsub = 0;
Use = USE_OPEN; // Do it now in case we are recursively called
return false;
} // end of OpenDB
/***********************************************************************/
/* Data Base read routine for XML access method. */
/***********************************************************************/
int TDBXML::ReadDB(PGLOBAL g)
{
bool same;
if (Void)
return RC_EF;
/*********************************************************************/
/* Now start the pseudo reading process. */
/*********************************************************************/
if (To_Kindex) {
/*******************************************************************/
/* Reading is by an index table. */
/*******************************************************************/
union {
uint Rpos;
BYTE Spos[4];
};
int recpos = To_Kindex->Fetch(g);
switch (recpos) {
case -1: // End of file reached
return RC_EF;
case -2: // No match for join
return RC_NF;
case -3: // Same record as last non null one
same = true;
return RC_OK;
default:
Rpos = recpos;
Nsub = Spos[0];
Spos[0] = 0;
if (Irow != (signed)ntohl(Rpos)) {
Irow = ntohl(Rpos);
same = false;
} else
same = true;
} // endswitch recpos
} else {
if (trace(1))
htrc("TDBXML ReadDB: Irow=%d Nrow=%d\n", Irow, Nrow);
// This is to force the table to be expanded when constructing
// an index for which the expand column is not specified.
if (Colp && Irow >= Header) {
Colp->Eval(g);
Colp->Reset();
} // endif Colp
if (!NextSame) {
if (++Irow == Nrow)
return RC_EF;
same = false;
Nsub = 0;
} else {
// Not sure the multiple column read will be called
NextSame = false;
same = true;
Nsub++;
} // endif NextSame
N++; // RowID
} // endif To_Kindex
if (!same) {
if (trace(2))
htrc("TDBXML ReadDB: Irow=%d RowNode=%p\n", Irow, RowNode);
// Get the new row node
if (Nlist) {
if ((RowNode = Nlist->GetItem(g, Irow, RowNode)) == NULL) {
sprintf(g->Message, MSG(MISSING_ROWNODE), Irow);
return RC_FX;
} // endif RowNode
} else
RowNode = TabNode;
if (Colname && Coltype == 2)
Clist = RowNode->SelectNodes(g, Colname, Clist);
} // endif same
return RC_OK;
} // end of ReadDB
/***********************************************************************/
/* CheckRow: called On Insert and Update. Must create the Row node */
/* if it does not exist (Insert) and update the Clist if called by */
/* a column having an Xpath because it can use an existing node that */
/* was added while inserting or Updating this row. */
/***********************************************************************/
bool TDBXML::CheckRow(PGLOBAL g, bool b)
{
if (NewRow && Mode == MODE_INSERT)
if (Rowname) {
TabNode->AddText(g, "\n\t");
RowNode = TabNode->AddChildNode(g, Rowname, RowNode);
} else {
strcpy(g->Message, MSG(NO_ROW_NODE));
return true;
} // endif Rowname
if (Colname && (NewRow || b))
Clist = RowNode->SelectNodes(g, Colname, Clist);
return NewRow = false;
} // end of CheckRow
/***********************************************************************/
/* WriteDB: Data Base write routine for XDB access methods. */
/***********************************************************************/
int TDBXML::WriteDB(PGLOBAL g)
{
if (Mode == MODE_INSERT) {
if (Hasnod)
RowNode->AddText(g, "\n\t");
NewRow = true;
} // endif Mode
// Something was changed in the document
Changed = true;
return RC_OK;
} // end of WriteDB
/***********************************************************************/
/* Data Base delete line routine for XDB access methods. */
/***********************************************************************/
int TDBXML::DeleteDB(PGLOBAL g, int irc)
{
// TODO: Handle null Nlist
if (irc == RC_FX) {
// Delete all rows
for (Irow = 0; Irow < Nrow; Irow++)
if ((RowNode = Nlist->GetItem(g, Irow, RowNode)) == NULL) {
sprintf(g->Message, MSG(MISSING_ROWNODE), Irow);
return RC_FX;
} else {
TabNode->DeleteChild(g, RowNode);
if (Nlist->DropItem(g, Irow))
return RC_FX;
} // endif RowNode
Changed = true;
} else if (irc != RC_EF) {
TabNode->DeleteChild(g, RowNode);
if (Nlist->DropItem(g, Irow))
return RC_FX;
Changed = true;
} // endif's irc
return RC_OK;
} // end of DeleteDB
/***********************************************************************/
/* Data Base close routine for XDB access methods. */
/***********************************************************************/
void TDBXML::CloseDB(PGLOBAL g)
{
if (Docp) {
if (Changed) {
char filename[_MAX_PATH];
// We used the file name relative to recorded datapath
PlugSetPath(filename, Xfile, GetPath());
if (Mode == MODE_INSERT)
TabNode->AddText(g, "\n");
// Save the modified document
if (Docp->DumpDoc(g, filename)) {
PushWarning(g, this);
Docp->CloseDoc(g, To_Xb);
// This causes a crash in Diagnostics_area::set_error_status
// throw (int)TYPE_AM_XML;
} // endif DumpDoc
} // endif Changed
// Free the document and terminate XML processing
Docp->CloseDoc(g, To_Xb);
} // endif docp
if (Multiple) {
// Reset all constants to start a new parse
Docp = NULL;
Root = NULL;
Curp = NULL;
DBnode = NULL;
TabNode = NULL;
RowNode = NULL;
ColNode = NULL;
Nlist = NULL;
Clist = NULL;
To_Xb = NULL;
Colp = NULL;
Changed = false;
Checked = false;
NextSame = false;
NewRow = false;
Hasnod = false;
Write = false;
Nodedone = false;
Void = false;
Nrow = -1;
Irow = Header - 1;
Nsub = 0;
N = 0;
} // endif Multiple
} // end of CloseDB
// ------------------------ XMLCOL functions ----------------------------
/***********************************************************************/
/* XMLCOL public constructor. */
/***********************************************************************/
XMLCOL::XMLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
: COLBLK(cdp, tdbp, i)
{
if (cprec) {
Next = cprec->GetNext();
cprec->SetNext(this);
} else {
Next = tdbp->GetColumns();
tdbp->SetColumns(this);
} // endif cprec
// Set additional XML access method information for column.
Tdbp = (PTDBXML)tdbp;
Nl = NULL;
Nlx = NULL;
ColNode = NULL;
ValNode = NULL;
Cxnp = NULL;
Vxnp = NULL;
Vxap = NULL;
AttNode = NULL;
Nodes = NULL;
Nod = 0;
Inod = -1;
Mul = false;
Checked = false;
Xname = cdp->GetFmt();
Long = cdp->GetLong();
Rank = cdp->GetOffset();
Type = Tdbp->Coltype;
Nx = -1;
Sx = -1;
N = 0;
Valbuf = NULL;
To_Val = NULL;
} // end of XMLCOL constructor
/***********************************************************************/
/* XMLCOL constructor used for copying columns. */
/* tdbp is the pointer to the new table descriptor. */
/***********************************************************************/
XMLCOL::XMLCOL(XMLCOL *col1, PTDB tdbp) : COLBLK(col1, tdbp)
{
Tdbp = col1->Tdbp;
Nl = col1->Nl;
Nlx = col1->Nlx;
ColNode = col1->ColNode;
ValNode = col1->ValNode;
Cxnp = col1->Cxnp;
Vxnp = col1->Vxnp;
Vxap = col1->Vxap;
AttNode = col1->AttNode;
Nodes = col1->Nodes;
Nod = col1->Nod;
Inod = col1->Inod;
Mul = col1->Mul;
Checked = col1->Checked;
Xname = col1->Xname;
Valbuf = col1->Valbuf;
Long = col1->Long;
Rank = col1->Rank;
Nx = col1->Nx;
Sx = col1->Sx;
N = col1->N;
Type = col1->Type;
To_Val = col1->To_Val;
} // end of XMLCOL copy constructor
/***********************************************************************/
/* Allocate a buffer of the proper size. */
/***********************************************************************/
bool XMLCOL::AllocBuf(PGLOBAL g, bool mode)
{
if (Valbuf)
return false; // Already done
return ParseXpath(g, mode);
} // end of AllocBuf
/***********************************************************************/
/* Parse the eventual passed Xpath information. */
/* This information can be specified in the Xpath (or Fieldfmt) */
/* column option when creating the table. It permits to indicate the */
/* position of the node corresponding to that column in a Xpath-like */
/* language (but not a truly compliant one). */
/***********************************************************************/
bool XMLCOL::ParseXpath(PGLOBAL g, bool mode)
{
char *p, *p2, *pbuf = NULL;
int i, n = 1, len = strlen(Name);
len += ((Tdbp->Colname) ? strlen(Tdbp->Colname) : 0);
len += ((Xname) ? strlen(Xname) : 0);
pbuf = (char*)PlugSubAlloc(g, NULL, len + 3);
*pbuf = '\0';
if (!mode)
// Take care of an eventual extra column node a la html
if (Tdbp->Colname) {
sprintf(pbuf, Tdbp->Colname, Rank + ((Tdbp->Usedom) ? 0 : 1));
strcat(pbuf, "/");
} // endif Colname
if (Xname) {
if (Type == 2) {
sprintf(g->Message, MSG(BAD_COL_XPATH), Name, Tdbp->Name);
return true;
} else
strcat(pbuf, Xname);
if (trace(1))
htrc("XMLCOL: pbuf=%s\n", pbuf);
// For Update or Insert the Xpath must be analyzed
if (mode) {
for (i = 0, p = pbuf; (p = strchr(p, '/')); i++, p++)
Nod++; // One path node found
if (Nod)
Nodes = (char**)PlugSubAlloc(g, NULL, Nod * sizeof(char*));
} // endif mode
// Analyze the Xpath for this column
for (i = 0, p = pbuf; (p2 = strchr(p, '/')); i++, p = p2 + 1) {
if (Tdbp->Mulnode && !strncmp(p, Tdbp->Mulnode, p2 - p))
if (!Tdbp->Xpand && mode) {
strcpy(g->Message, MSG(CONCAT_SUBNODE));
return true;
} else
Inod = i; // Index of multiple node
if (mode) {
// For Update or Insert the Xpath must be explicit
if (strchr("@/.*", *p)) {
sprintf(g->Message, MSG(XPATH_NOT_SUPP), Name);
return true;
} else
Nodes[i] = p;
*p2 = '\0';
} // endif mode
} // endfor i, p
if (*p == '/' || *p == '.') {
sprintf(g->Message, MSG(XPATH_NOT_SUPP), Name);
return true;
} else if (*p == '@') {
p++; // Remove the @ if mode
Type = 0; // Column is an attribute
} else
Type = 1; // Column is a node
if (!*p)
strcpy(p, Name); // Xname is column name
if (Type && Tdbp->Mulnode && !strcmp(p, Tdbp->Mulnode))
Inod = Nod; // Index of multiple node
if (mode) // Prepare Xname
pbuf = p;
} else if (Type == 2) {
// HTML like table, columns are retrieved by position
new(this) XPOSCOL(Value); // Change the class of this column
Inod = -1;
} else if (Type == 0 && !mode) {
strcat(strcat(pbuf, "@"), Name);
} else { // Type == 1
if (Tdbp->Mulnode && !strcmp(Name, Tdbp->Mulnode))
Inod = 0; // Nod
strcat(pbuf, Name);
} // endif,s
if (Inod >= 0) {
Tdbp->Colp = this; // To force expand
if (Tdbp->Xpand)
n = Tdbp->Limit;
new(this) XMULCOL(Value); // Change the class of this column
} // endif Inod
Valbuf = (char*)PlugSubAlloc(g, NULL, n * (Long + 1));
for (i = 0; i < n; i++)
Valbuf[Long + (i * (Long + 1))] = '\0';
if (Type || Nod)
Tdbp->Hasnod = true;
if (trace(1))
htrc("XMLCOL: Xname=%s\n", pbuf);
// Save the calculated Xpath
Xname = pbuf;
return false;
} // end of ParseXpath
/***********************************************************************/
/* SetBuffer: prepare a column block for write operation. */
/***********************************************************************/
bool XMLCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
{
if (!(To_Val = value)) {
sprintf(g->Message, MSG(VALUE_ERROR), Name);
return true;
} else if (Buf_Type == value->GetType()) {
// Values are of the (good) column type
if (Buf_Type == TYPE_DATE) {
// If any of the date values is formatted
// output format must be set for the receiving table
if (GetDomain() || ((DTVAL *)value)->IsFormatted())
goto newval; // This will make a new value;
} else if (Buf_Type == TYPE_DOUBLE)
// Float values must be written with the correct (column) precision
// Note: maybe this should be forced by ShowValue instead of this ?
value->SetPrec(GetScale());
Value = value; // Directly access the external value
} else {
// Values are not of the (good) column type
if (check) {
sprintf(g->Message, MSG(TYPE_VALUE_ERR), Name,
GetTypeName(Buf_Type), GetTypeName(value->GetType()));
return true;
} // endif check
newval:
if (InitValue(g)) // Allocate the matching value block
return true;
} // endif's Value, Buf_Type
// Because Colblk's have been made from a copy of the original TDB in
// case of Update, we must reset them to point to the original one.
if (To_Tdb->GetOrig()) {
To_Tdb = (PTDB)To_Tdb->GetOrig();
Tdbp = (PTDBXML)To_Tdb; // Specific of XMLCOL
// Allocate the XML buffer
if (AllocBuf(g, true)) // In Write mode
return true;
} // endif GetOrig
// Set the Column
Status = (ok) ? BUF_EMPTY : BUF_NO;
return false;
} // end of SetBuffer
/***********************************************************************/
/* Alloc the nodes that will be used during the whole process. */
/***********************************************************************/
void XMLCOL::AllocNodes(PGLOBAL g, PXDOC dp)
{
Cxnp = dp->NewPnode(g);
Vxnp = dp->NewPnode(g);
Vxap = dp->NewPattr(g);
} // end of AllocNodes
/***********************************************************************/
/* ReadColumn: what this routine does is to access the column node */
/* from the corresponding table, extract from it the node text and */
/* convert it to the column type. */
/***********************************************************************/
void XMLCOL::ReadColumn(PGLOBAL g)
{
if (Nx == Tdbp->Irow)
return; // Same row than the last read
ValNode = Tdbp->RowNode->SelectSingleNode(g, Xname, Vxnp);
if (ValNode) {
if (ValNode->GetType() != XML_ELEMENT_NODE &&
ValNode->GetType() != XML_ATTRIBUTE_NODE) {
sprintf(g->Message, MSG(BAD_VALNODE), ValNode->GetType(), Name);
throw (int)TYPE_AM_XML;
} // endif type
// Get the Xname value from the XML file
switch (ValNode->GetContent(g, Valbuf, Long + 1)) {
case RC_OK:
break;
case RC_INFO:
PushWarning(g, Tdbp);
break;
default:
throw (int)TYPE_AM_XML;
} // endswitch
Value->SetValue_psz(Valbuf);
} else {
if (Nullable)
Value->SetNull(true);
Value->Reset(); // Null value
} // endif ValNode
Nx = Tdbp->Irow;
} // end of ReadColumn
/***********************************************************************/
/* WriteColumn: what this routine does is to access the last row of */
/* the corresponding table, and rewrite the content corresponding */
/* to this column node from the column buffer and type. */
/***********************************************************************/
void XMLCOL::WriteColumn(PGLOBAL g)
{
char *p, buf[16];
int done = 0;
int i, n, k = 0;
PXNODE TopNode = NULL;
if (trace(2))
htrc("XML WriteColumn: col %s R%d coluse=%.4X status=%.4X\n",
Name, Tdbp->GetTdb_No(), ColUse, Status);
/*********************************************************************/
/* Check whether this node must be written. */
/*********************************************************************/
if (Value != To_Val)
Value->SetValue_pval(To_Val, false); // Convert the updated value
/*********************************************************************/
/* If a check pass was done while updating, all node contruction */
/* has been already one. */
/*********************************************************************/
if (Status && Tdbp->Checked && !Value->IsNull()) {
assert (ColNode != NULL);
assert ((Type ? (void *)ValNode : (void *)AttNode) != NULL);
goto fin;
} // endif Checked
/*********************************************************************/
/* On Insert, a Row node must be created for each row; */
/* For columns having an Xpath, the Clist must be updated. */
/*********************************************************************/
if (Tdbp->CheckRow(g, Nod || Tdbp->Colname))
throw (int)TYPE_AM_XML;
/*********************************************************************/
/* Null values are represented by no node. */
/*********************************************************************/
if (Value->IsNull())
return;
/*********************************************************************/
/* Find the column and value nodes to update or insert. */
/*********************************************************************/
if (Tdbp->Clist) {
n = Tdbp->Clist->GetLength();
ColNode = NULL;
} else {
n = 1;
ColNode = Tdbp->RowNode->Clone(g, ColNode);
} // endif Clist
ValNode = NULL;
for (i = 0; i < n; i++) {
if (Tdbp->Clist)
ColNode = Tdbp->Clist->GetItem(g, i, Cxnp);
/*******************************************************************/
/* Check whether an Xpath was provided to go to the column node. */
/*******************************************************************/
for (k = 0; k < Nod; k++)
if ((ColNode = ColNode->SelectSingleNode(g, Nodes[k], Cxnp)))
TopNode = ColNode;
else
break;
if (ColNode)
if (Type)
ValNode = ColNode->SelectSingleNode(g, Xname, Vxnp);
else
AttNode = ColNode->GetAttribute(g, Xname, Vxap);
if (TopNode || ValNode || AttNode)
break; // We found the good column
else if (Tdbp->Clist)
ColNode = NULL;
} // endfor i
/*********************************************************************/
/* Create missing nodes. */
/*********************************************************************/
if (ColNode == NULL) {
if (TopNode == NULL)
if (Tdbp->Clist) {
Tdbp->RowNode->AddText(g, "\n\t\t");
ColNode = Tdbp->RowNode->AddChildNode(g, Tdbp->Colname);
done = 2;
TopNode = ColNode;
} else
TopNode = Tdbp->RowNode;
for (; k < Nod && TopNode; k++) {
if (!done) {
TopNode->AddText(g, "\n\t\t");
done = 1;
} // endif done
ColNode = TopNode->AddChildNode(g, Nodes[k], Cxnp);
TopNode = ColNode;
} // endfor k
if (ColNode == NULL) {
strcpy(g->Message, MSG(COL_ALLOC_ERR));
throw (int)TYPE_AM_XML;
} // endif ColNode
} // endif ColNode
if (Type == 1) {
if (ValNode == NULL) {
if (done < 2)
ColNode->AddText(g, "\n\t\t");
ValNode = ColNode->AddChildNode(g, Xname, Vxnp);
} // endif ValNode
} else // (Type == 0)
if (AttNode == NULL)
AttNode = ColNode->AddProperty(g, Xname, Vxap);
if (ValNode == NULL && AttNode == NULL) {
strcpy(g->Message, MSG(VAL_ALLOC_ERR));
throw (int)TYPE_AM_XML;
} // endif ValNode
/*********************************************************************/
/* Get the string representation of Value according to column type. */
/*********************************************************************/
p = Value->GetCharString(buf);
if (strlen(p) > (unsigned)Long) {
sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long);
throw (int)TYPE_AM_XML;
} else
strcpy(Valbuf, p);
/*********************************************************************/
/* Updating must be done only when not in checking pass. */
/*********************************************************************/
fin:
if (Status) {
if (Type) {
ValNode->SetContent(g, Valbuf, Long);
} else
AttNode->SetText(g, Valbuf, Long);
} // endif Status
} // end of WriteColumn
// ------------------------ XMULCOL functions ---------------------------
/***********************************************************************/
/* ReadColumn: what this routine does is to access the column node */
/* from the corresponding table, extract from it the node text and */
/* convert it to the column type. */
/***********************************************************************/
void XMULCOL::ReadColumn(PGLOBAL g)
{
char *p;
int i, len;
bool b = Tdbp->Xpand;
if (Nx != Tdbp->Irow) { // New row
Nl = Tdbp->RowNode->SelectNodes(g, Xname, Nl);
if ((N = Nl->GetLength())) {
*(p = Valbuf) = '\0';
len = Long;
if (N > Tdbp->Limit) {
N = Tdbp->Limit;
sprintf(g->Message, "Multiple values limited to %d", Tdbp->Limit);
PushWarning(g, Tdbp);
} // endif N
for (i = 0; i < N; i++) {
ValNode = Nl->GetItem(g, i, Vxnp);
if (ValNode->GetType() != XML_ELEMENT_NODE &&
ValNode->GetType() != XML_ATTRIBUTE_NODE) {
sprintf(g->Message, MSG(BAD_VALNODE), ValNode->GetType(), Name);
throw (int)TYPE_AM_XML;
} // endif type
// Get the Xname value from the XML file
switch (ValNode->GetContent(g, p, (b ? Long : len))) {
case RC_OK:
break;
case RC_INFO:
PushWarning(g, Tdbp);
break;
default:
throw (int)TYPE_AM_XML;
} // endswitch
if (!b) {
// Concatenate all values
if (N - i > 1)
strncat(Valbuf, ", ", len - strlen(p));
if ((len -= strlen(p)) <= 0)
break;
p += strlen(p);
} else // Xpand
p += (Long + 1);
} // endfor i
Value->SetValue_psz(Valbuf);
} else {
if (Nullable)
Value->SetNull(true);
Value->Reset(); // Null value
} // endif ValNode
} else if (Sx == Tdbp->Nsub)
return; // Same row
else // Expanded value
Value->SetValue_psz(Valbuf + (Tdbp->Nsub * (Long + 1)));
Nx = Tdbp->Irow;
Sx = Tdbp->Nsub;
Tdbp->NextSame = (Tdbp->Xpand && N - Sx > 1);
} // end of ReadColumn
/***********************************************************************/
/* WriteColumn: what this routine does is to access the last line */
/* read from the corresponding table, and rewrite the field */
/* corresponding to this column from the column buffer and type. */
/***********************************************************************/
void XMULCOL::WriteColumn(PGLOBAL g)
{
char *p, buf[16];
int done = 0;
int i, n, len, k = 0;
PXNODE TopNode = NULL;
if (trace(1))
htrc("XML WriteColumn: col %s R%d coluse=%.4X status=%.4X\n",
Name, Tdbp->GetTdb_No(), ColUse, Status);
/*********************************************************************/
/* Check whether this node must be written. */
/*********************************************************************/
if (Value != To_Val)
Value->SetValue_pval(To_Val, false); // Convert the updated value
if (Value->IsNull())
return;
/*********************************************************************/
/* If a check pass was done while updating, all node contruction */
/* has been already one. */
/*********************************************************************/
if (Status && Tdbp->Checked) {
assert (ColNode);
assert ((Type ? (void *)ValNode : (void *)AttNode) != NULL);
goto fin;
} // endif Checked
/*********************************************************************/
/* On Insert, a Row node must be created for each row; */
/* For columns having an Xpath, the Clist must be updated. */
/*********************************************************************/
if (Tdbp->CheckRow(g, Nod))
throw (int)TYPE_AM_XML;
/*********************************************************************/
/* Find the column and value nodes to update or insert. */
/*********************************************************************/
if (Tdbp->Clist) {
n = Tdbp->Clist->GetLength();
ColNode = NULL;
} else {
n = 1;
ColNode = Tdbp->RowNode->Clone(g, ColNode);
} // endif Clist
ValNode = NULL;
for (i = 0; i < n; i++) {
if (Tdbp->Clist)
ColNode = Tdbp->Clist->GetItem(g, i, Cxnp);
/*******************************************************************/
/* Check whether an Xpath was provided to go to the column node. */
/*******************************************************************/
for (k = 0; k < Nod; k++) {
if (k == Inod) {
// This is the multiple node
Nlx = ColNode->SelectNodes(g, Nodes[k], Nlx);
ColNode = Nlx->GetItem(g, Tdbp->Nsub, Cxnp);
} else
ColNode = ColNode->SelectSingleNode(g, Nodes[k], Cxnp);
if (ColNode == NULL)
break;
TopNode = ColNode;
} // endfor k
if (ColNode)
if (Inod == Nod) {
/***************************************************************/
/* The node value can be multiple. */
/***************************************************************/
assert (Type);
// Get the value Node from the XML list
Nlx = ColNode->SelectNodes(g, Xname, Nlx);
len = Nlx->GetLength();
if (len > 1 && !Tdbp->Xpand) {
sprintf(g->Message, MSG(BAD_VAL_UPDATE), Name);
throw (int)TYPE_AM_XML;
} else
ValNode = Nlx->GetItem(g, Tdbp->Nsub, Vxnp);
} else // Inod != Nod
if (Type)
ValNode = ColNode->SelectSingleNode(g, Xname, Vxnp);
else
AttNode = ColNode->GetAttribute(g, Xname, Vxap);
if (TopNode || ValNode || AttNode)
break; // We found the good column
else if (Tdbp->Clist)
ColNode = NULL;
} // endfor i
/*********************************************************************/
/* Create missing nodes. */
/*********************************************************************/
if (ColNode == NULL) {
if (TopNode == NULL)
if (Tdbp->Clist) {
Tdbp->RowNode->AddText(g, "\n\t\t");
ColNode = Tdbp->RowNode->AddChildNode(g, Tdbp->Colname);
done = 2;
TopNode = ColNode;
} else
TopNode = Tdbp->RowNode;
for (; k < Nod && TopNode; k++) {
if (!done) {
TopNode->AddText(g, "\n\t\t");
done = 1;
} // endif done
ColNode = TopNode->AddChildNode(g, Nodes[k], Cxnp);
TopNode = ColNode;
} // endfor k
if (ColNode == NULL) {
strcpy(g->Message, MSG(COL_ALLOC_ERR));
throw (int)TYPE_AM_XML;
} // endif ColNode
} // endif ColNode
if (Type == 1) {
if (ValNode == NULL) {
if (done < 2)
ColNode->AddText(g, "\n\t\t");
ValNode = ColNode->AddChildNode(g, Xname, Vxnp);
} // endif ValNode
} else // (Type == 0)
if (AttNode == NULL)
AttNode = ColNode->AddProperty(g, Xname, Vxap);
if (ValNode == NULL && AttNode == NULL) {
strcpy(g->Message, MSG(VAL_ALLOC_ERR));
throw (int)TYPE_AM_XML;
} // endif ValNode
/*********************************************************************/
/* Get the string representation of Value according to column type. */
/*********************************************************************/
p = Value->GetCharString(buf);
if (strlen(p) > (unsigned)Long) {
sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long);
throw (int)TYPE_AM_XML;
} else
strcpy(Valbuf, p);
/*********************************************************************/
/* Updating must be done only when not in checking pass. */
/*********************************************************************/
fin:
if (Status) {
if (Type) {
ValNode->SetContent(g, Valbuf, Long);
} else
AttNode->SetText(g, Valbuf, Long);
} // endif Status
} // end of WriteColumn
/* ------------------------ XPOSCOL functions ------------------------ */
/***********************************************************************/
/* ReadColumn: what this routine does is to access the column node */
/* from the corresponding table, extract from it the node text and */
/* convert it to the column type. */
/***********************************************************************/
void XPOSCOL::ReadColumn(PGLOBAL g)
{
if (Nx == Tdbp->Irow)
return; // Same row than the last read
if (Tdbp->Clist == NULL) {
strcpy(g->Message, MSG(MIS_TAG_LIST));
throw (int)TYPE_AM_XML;
} // endif Clist
if ((ValNode = Tdbp->Clist->GetItem(g, Rank, Vxnp))) {
// Get the column value from the XML file
switch (ValNode->GetContent(g, Valbuf, Long + 1)) {
case RC_OK:
break;
case RC_INFO:
PushWarning(g, Tdbp);
break;
default:
throw (int)TYPE_AM_XML;
} // endswitch
Value->SetValue_psz(Valbuf);
} else {
if (Nullable)
Value->SetNull(true);
Value->Reset(); // Null value
} // endif ValNode
Nx = Tdbp->Irow;
} // end of ReadColumn
/***********************************************************************/
/* WriteColumn: what this routine does is to access the last line */
/* read from the corresponding table, and rewrite the field */
/* corresponding to this column from the column buffer and type. */
/***********************************************************************/
void XPOSCOL::WriteColumn(PGLOBAL g)
{
char *p, buf[16];
int i, k, n;
if (trace(1))
htrc("XML WriteColumn: col %s R%d coluse=%.4X status=%.4X\n",
Name, Tdbp->GetTdb_No(), ColUse, Status);
/*********************************************************************/
/* Check whether this node must be written. */
/*********************************************************************/
if (Value != To_Val)
Value->SetValue_pval(To_Val, false); // Convert the updated value
if (Value->IsNull())
return;
/*********************************************************************/
/* If a check pass was done while updating, all node contruction */
/* has been already one. */
/*********************************************************************/
if (Status && Tdbp->Checked) {
assert (ValNode);
goto fin;
} // endif Checked
/*********************************************************************/
/* On Insert, a Row node must be created for each row; */
/* For all columns the Clist must be updated. */
/*********************************************************************/
if (Tdbp->CheckRow(g, true))
throw (int)TYPE_AM_XML;
/*********************************************************************/
/* Find the column and value nodes to update or insert. */
/*********************************************************************/
if (Tdbp->Clist == NULL) {
strcpy(g->Message, MSG(MIS_TAG_LIST));
throw (int)TYPE_AM_XML;
} // endif Clist
n = Tdbp->Clist->GetLength();
k = Rank;
if (!(ValNode = Tdbp->Clist->GetItem(g, k, Vxnp))) {
/*******************************************************************/
/* Create missing column nodes. */
/*******************************************************************/
Tdbp->RowNode->AddText(g, "\n\t\t");
for (i = n; i <= k; i++)
ValNode = Tdbp->RowNode->AddChildNode(g, Tdbp->Colname, Vxnp);
assert (ValNode);
} // endif ValNode
/*********************************************************************/
/* Get the string representation of Value according to column type. */
/*********************************************************************/
p = Value->GetCharString(buf);
if (strlen(p) > (unsigned)Long) {
sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long);
throw (int)TYPE_AM_XML;
} else
strcpy(Valbuf, p);
/*********************************************************************/
/* Updating must be done only when not in checking pass. */
/*********************************************************************/
fin:
if (Status)
ValNode->SetContent(g, Valbuf, Long);
} // end of WriteColumn
/* ---------------------------TDBXCT class --------------------------- */
/***********************************************************************/
/* TDBXCT class constructor. */
/***********************************************************************/
TDBXCT::TDBXCT(PXMLDEF tdp) : TDBCAT(tdp)
{
Topt = tdp->GetTopt();
//Db = (char*)tdp->GetDB();
Db = (char*)tdp->Schema;
Tabn = tdp->Tabname;
} // end of TDBXCT constructor
/***********************************************************************/
/* GetResult: Get the list the JSON file columns. */
/***********************************************************************/
PQRYRES TDBXCT::GetResult(PGLOBAL g)
{
return XMLColumns(g, Db, Tabn, Topt, false);
} // end of GetResult
/* ------------------------ End of Tabxml ---------------------------- */
| 1 | 14,172 | Again, would be awesome if you could remove the tabs in these 3-4 lines as well. | MariaDB-server | cpp |
@@ -234,9 +234,14 @@ util::json::Object makeRouteStep(guidance::RouteStep step, util::json::Value geo
util::json::Object route_step;
route_step.values["distance"] = std::round(step.distance * 10) / 10.;
route_step.values["duration"] = std::round(step.duration * 10) / 10.;
- route_step.values["name"] = std::move(step.name);
- if (!step.ref.empty())
+ route_step.values["way_name"] = std::move(step.name);
+ if (step.ref.empty())
+ route.step.values["name"] = std::move(step.name);
+ else
+ {
+ route_step.values["name"] = std::move(step.name) << " (" << std::move(step.ref) << ")";
route_step.values["ref"] = std::move(step.ref);
+ }
if (!step.pronunciation.empty())
route_step.values["pronunciation"] = std::move(step.pronunciation);
if (!step.destinations.empty()) | 1 | #include "engine/api/json_factory.hpp"
#include "engine/hint.hpp"
#include "engine/polyline_compressor.hpp"
#include "util/integer_range.hpp"
#include "util/guidance/bearing_class.hpp"
#include "util/guidance/entry_class.hpp"
#include "util/guidance/toolkit.hpp"
#include "util/typedefs.hpp"
#include <boost/assert.hpp>
#include <boost/optional.hpp>
#include <algorithm>
#include <iterator>
#include <string>
#include <utility>
#include <vector>
namespace TurnType = osrm::extractor::guidance::TurnType;
namespace DirectionModifier = osrm::extractor::guidance::DirectionModifier;
using TurnInstruction = osrm::extractor::guidance::TurnInstruction;
namespace osrm
{
namespace engine
{
namespace api
{
namespace json
{
namespace detail
{
const constexpr char *modifier_names[] = {"uturn",
"sharp right",
"right",
"slight right",
"straight",
"slight left",
"left",
"sharp left"};
// translations of TurnTypes. Not all types are exposed to the outside world.
// invalid types should never be returned as part of the API
const constexpr char *turn_type_names[] = {
"invalid", "new name", "continue", "turn", "merge",
"on ramp", "off ramp", "fork", "end of road", "notification",
"roundabout", "roundabout", "rotary", "rotary", "roundabout turn",
"roundabout turn", "use lane", "invalid", "invalid", "invalid",
"invalid", "invalid", "invalid", "invalid", "invalid",
"invalid", "invalid"};
const constexpr char *waypoint_type_names[] = {"invalid", "arrive", "depart"};
// Check whether to include a modifier in the result of the API
inline bool isValidModifier(const guidance::StepManeuver maneuver)
{
return (maneuver.waypoint_type == guidance::WaypointType::None ||
maneuver.instruction.direction_modifier != DirectionModifier::UTurn);
}
inline bool hasValidLanes(const guidance::Intersection &intersection)
{
return intersection.lanes.lanes_in_turn > 0;
}
std::string instructionTypeToString(const TurnType::Enum type)
{
static_assert(sizeof(turn_type_names) / sizeof(turn_type_names[0]) >= TurnType::MaxTurnType,
"Some turn types has not string representation.");
return turn_type_names[static_cast<std::size_t>(type)];
}
util::json::Array lanesFromIntersection(const guidance::Intersection &intersection)
{
BOOST_ASSERT(intersection.lanes.lanes_in_turn >= 1);
util::json::Array result;
LaneID lane_id = intersection.lane_description.size();
for (const auto &lane_desc : intersection.lane_description)
{
--lane_id;
util::json::Object lane;
lane.values["indications"] = extractor::guidance::TurnLaneType::toJsonArray(lane_desc);
if (lane_id >= intersection.lanes.first_lane_from_the_right &&
lane_id <
intersection.lanes.first_lane_from_the_right + intersection.lanes.lanes_in_turn)
lane.values["valid"] = util::json::True();
else
lane.values["valid"] = util::json::False();
result.values.push_back(lane);
}
return result;
}
std::string instructionModifierToString(const DirectionModifier::Enum modifier)
{
static_assert(sizeof(modifier_names) / sizeof(modifier_names[0]) >=
DirectionModifier::MaxDirectionModifier,
"Some direction modifiers has not string representation.");
return modifier_names[static_cast<std::size_t>(modifier)];
}
std::string waypointTypeToString(const guidance::WaypointType waypoint_type)
{
static_assert(sizeof(waypoint_type_names) / sizeof(waypoint_type_names[0]) >=
static_cast<size_t>(guidance::WaypointType::MaxWaypointType),
"Some waypoint types has not string representation.");
return waypoint_type_names[static_cast<std::size_t>(waypoint_type)];
}
util::json::Array coordinateToLonLat(const util::Coordinate coordinate)
{
util::json::Array array;
array.values.push_back(static_cast<double>(toFloating(coordinate.lon)));
array.values.push_back(static_cast<double>(toFloating(coordinate.lat)));
return array;
}
// FIXME this actually needs to be configurable from the profiles
std::string modeToString(const extractor::TravelMode mode)
{
std::string token;
switch (mode)
{
case TRAVEL_MODE_INACCESSIBLE:
token = "inaccessible";
break;
case TRAVEL_MODE_DRIVING:
token = "driving";
break;
case TRAVEL_MODE_CYCLING:
token = "cycling";
break;
case TRAVEL_MODE_WALKING:
token = "walking";
break;
case TRAVEL_MODE_FERRY:
token = "ferry";
break;
case TRAVEL_MODE_TRAIN:
token = "train";
break;
case TRAVEL_MODE_PUSHING_BIKE:
token = "pushing bike";
break;
case TRAVEL_MODE_STEPS_UP:
token = "steps up";
break;
case TRAVEL_MODE_STEPS_DOWN:
token = "steps down";
break;
case TRAVEL_MODE_RIVER_UP:
token = "river upstream";
break;
case TRAVEL_MODE_RIVER_DOWN:
token = "river downstream";
break;
case TRAVEL_MODE_ROUTE:
token = "route";
break;
default:
token = "other";
break;
}
return token;
}
} // namespace detail
util::json::Object makeStepManeuver(const guidance::StepManeuver &maneuver)
{
util::json::Object step_maneuver;
if (maneuver.waypoint_type == guidance::WaypointType::None)
step_maneuver.values["type"] = detail::instructionTypeToString(maneuver.instruction.type);
else
step_maneuver.values["type"] = detail::waypointTypeToString(maneuver.waypoint_type);
if (detail::isValidModifier(maneuver))
step_maneuver.values["modifier"] =
detail::instructionModifierToString(maneuver.instruction.direction_modifier);
step_maneuver.values["location"] = detail::coordinateToLonLat(maneuver.location);
step_maneuver.values["bearing_before"] = std::round(maneuver.bearing_before);
step_maneuver.values["bearing_after"] = std::round(maneuver.bearing_after);
if (maneuver.exit != 0)
step_maneuver.values["exit"] = maneuver.exit;
return step_maneuver;
}
util::json::Object makeIntersection(const guidance::Intersection &intersection)
{
util::json::Object result;
util::json::Array bearings;
util::json::Array entry;
bearings.values.reserve(intersection.bearings.size());
std::copy(intersection.bearings.begin(),
intersection.bearings.end(),
std::back_inserter(bearings.values));
entry.values.reserve(intersection.entry.size());
std::transform(intersection.entry.begin(),
intersection.entry.end(),
std::back_inserter(entry.values),
[](const bool has_entry) -> util::json::Value {
if (has_entry)
return util::json::True();
else
return util::json::False();
});
result.values["location"] = detail::coordinateToLonLat(intersection.location);
result.values["bearings"] = bearings;
result.values["entry"] = entry;
if (intersection.in != guidance::Intersection::NO_INDEX)
result.values["in"] = intersection.in;
if (intersection.out != guidance::Intersection::NO_INDEX)
result.values["out"] = intersection.out;
if (detail::hasValidLanes(intersection))
result.values["lanes"] = detail::lanesFromIntersection(intersection);
return result;
}
util::json::Object makeRouteStep(guidance::RouteStep step, util::json::Value geometry)
{
util::json::Object route_step;
route_step.values["distance"] = std::round(step.distance * 10) / 10.;
route_step.values["duration"] = std::round(step.duration * 10) / 10.;
route_step.values["name"] = std::move(step.name);
if (!step.ref.empty())
route_step.values["ref"] = std::move(step.ref);
if (!step.pronunciation.empty())
route_step.values["pronunciation"] = std::move(step.pronunciation);
if (!step.destinations.empty())
route_step.values["destinations"] = std::move(step.destinations);
if (!step.rotary_name.empty())
{
route_step.values["rotary_name"] = std::move(step.rotary_name);
if (!step.rotary_pronunciation.empty())
{
route_step.values["rotary_pronunciation"] = std::move(step.rotary_pronunciation);
}
}
route_step.values["mode"] = detail::modeToString(std::move(step.mode));
route_step.values["maneuver"] = makeStepManeuver(std::move(step.maneuver));
route_step.values["geometry"] = std::move(geometry);
util::json::Array intersections;
intersections.values.reserve(step.intersections.size());
std::transform(step.intersections.begin(),
step.intersections.end(),
std::back_inserter(intersections.values),
makeIntersection);
route_step.values["intersections"] = std::move(intersections);
return route_step;
}
util::json::Object makeRoute(const guidance::Route &route,
util::json::Array legs,
boost::optional<util::json::Value> geometry)
{
util::json::Object json_route;
json_route.values["distance"] = std::round(route.distance * 10) / 10.;
json_route.values["duration"] = std::round(route.duration * 10) / 10.;
json_route.values["legs"] = std::move(legs);
if (geometry)
{
json_route.values["geometry"] = *std::move(geometry);
}
return json_route;
}
util::json::Object makeWaypoint(const util::Coordinate location, std::string name, const Hint &hint)
{
util::json::Object waypoint;
waypoint.values["location"] = detail::coordinateToLonLat(location);
waypoint.values["name"] = std::move(name);
waypoint.values["hint"] = hint.ToBase64();
return waypoint;
}
util::json::Object makeRouteLeg(guidance::RouteLeg leg, util::json::Array steps)
{
util::json::Object route_leg;
route_leg.values["distance"] = std::round(leg.distance * 10) / 10.;
route_leg.values["duration"] = std::round(leg.duration * 10) / 10.;
route_leg.values["summary"] = std::move(leg.summary);
route_leg.values["steps"] = std::move(steps);
return route_leg;
}
util::json::Object
makeRouteLeg(guidance::RouteLeg leg, util::json::Array steps, util::json::Object annotation)
{
util::json::Object route_leg = makeRouteLeg(std::move(leg), std::move(steps));
route_leg.values["annotation"] = std::move(annotation);
return route_leg;
}
util::json::Array makeRouteLegs(std::vector<guidance::RouteLeg> legs,
std::vector<util::json::Value> step_geometries,
std::vector<util::json::Object> annotations)
{
util::json::Array json_legs;
auto step_geometry_iter = step_geometries.begin();
for (const auto idx : util::irange<std::size_t>(0UL, legs.size()))
{
auto leg = std::move(legs[idx]);
util::json::Array json_steps;
json_steps.values.reserve(leg.steps.size());
std::transform(std::make_move_iterator(leg.steps.begin()),
std::make_move_iterator(leg.steps.end()),
std::back_inserter(json_steps.values),
[&step_geometry_iter](guidance::RouteStep step) {
return makeRouteStep(std::move(step), std::move(*step_geometry_iter++));
});
if (annotations.size() > 0)
{
json_legs.values.push_back(
makeRouteLeg(std::move(leg), std::move(json_steps), annotations[idx]));
}
else
{
json_legs.values.push_back(makeRouteLeg(std::move(leg), std::move(json_steps)));
}
}
return json_legs;
}
} // namespace json
} // namespace api
} // namespace engine
} // namespace osrm
| 1 | 17,866 | You're moving multiple times from `step.name`; you're not allowed to do that. Instead you have to copy the `step.name` string and (if you want to) move once at the very last. | Project-OSRM-osrm-backend | cpp |
@@ -124,15 +124,15 @@ app.controller('EditorController', ['$scope', 'TimezoneService', 'AutoCompletion
$scope.validate = function() {
var error = false;
if ($scope.properties.summary === null || $scope.properties.summary.value.trim() === '') {
- OC.Notification.showTemporary(t('calendar', 'Please add a title!'));
+ OC.Notification.showTemporary(t('calendar', 'Please add a title.'));
error = true;
}
if ($scope.calendar === null || typeof $scope.calendar === 'undefined') {
- OC.Notification.showTemporary(t('calendar', 'Please select a calendar!'));
+ OC.Notification.showTemporary(t('calendar', 'Please select a calendar.'));
error = true;
}
if (!$scope.properties.checkDtStartBeforeDtEnd()) {
- OC.Notification.showTemporary(t('calendar', 'The event ends before it starts!'));
+ OC.Notification.showTemporary(t('calendar', 'The event can not end before it starts.'));
error = true;
}
| 1 | /**
* Calendar App
*
* @author Raghu Nayyar
* @author Georg Ehrke
* @copyright 2016 Raghu Nayyar <[email protected]>
* @copyright 2016 Georg Ehrke <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
* License as published by the Free Software Foundation; either
* version 3 of the License, or any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU AFFERO GENERAL PUBLIC LICENSE for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with this library. If not, see <http://www.gnu.org/licenses/>.
*
*/
/**
* Controller: Events Dialog Controller
* Description: Takes care of anything inside the Events Modal.
*/
app.controller('EditorController', ['$scope', 'TimezoneService', 'AutoCompletionService', '$timeout', '$window', '$uibModalInstance', 'vevent', 'simpleEvent', 'calendar', 'isNew', 'emailAddress',
function($scope, TimezoneService, AutoCompletionService, $timeout, $window, $uibModalInstance, vevent, simpleEvent, calendar, isNew, emailAddress) {
'use strict';
$scope.properties = simpleEvent;
$scope.is_new = isNew;
$scope.calendar = calendar;
$scope.oldCalendar = isNew ? calendar : vevent.calendar;
$scope.readOnly = !vevent.calendar.isWritable();
$scope.accessibleViaCalDAV = vevent.calendar.eventsAccessibleViaCalDAV();
$scope.selected = 0;
$scope.timezones = [];
$scope.emailAddress = emailAddress;
$scope.edittimezone = ((
$scope.properties.dtstart.parameters.zone !== 'floating' &&
$scope.properties.dtstart.parameters.zone !== $scope.defaulttimezone) || (
$scope.properties.dtend.parameters.zone !== 'floating' &&
$scope.properties.dtend.parameters.zone !== $scope.defaulttimezone
));
$scope.preEditingHooks = [];
$scope.postEditingHooks = [];
$scope.tabs = [
{title: t('calendar', 'Details'), value: 0},
{title: t('calendar', 'Attendees'), value: 1},
{title: t('calendar', 'Reminders'), value: 2},
{title: t('calendar', 'Repeating'), value: 3}
];
$scope.classSelect = [
{displayname: t('calendar', 'When shared show full event'), type: 'PUBLIC'},
{displayname: t('calendar', 'When shared show only busy'), type: 'CONFIDENTIAL'},
{displayname: t('calendar', 'When shared hide this event'), type: 'PRIVATE'}
];
$scope.statusSelect = [
{displayname: t('calendar', 'Confirmed'), type: 'CONFIRMED'},
{displayname: t('calendar', 'Tentative'), type: 'TENTATIVE'},
{displayname: t('calendar', 'Cancelled'), type: 'CANCELLED'}
];
$scope.registerPreHook = function(callback) {
$scope.preEditingHooks.push(callback);
};
$uibModalInstance.rendered.then(function() {
if ($scope.properties.allDay) {
$scope.properties.dtend.value = moment($scope.properties.dtend.value.subtract(1, 'days'));
}
autosize($('.advanced--textarea'));
autosize($('.events--textarea'));
$timeout(() => {
autosize.update($('.advanced--textarea'));
autosize.update($('.events--textarea'));
}, 50);
angular.forEach($scope.preEditingHooks, function(callback) {
callback();
});
$scope.tabopener(0);
});
$scope.registerPostHook = function(callback) {
$scope.postEditingHooks.push(callback);
};
$scope.proceed = function() {
$scope.prepareClose();
$uibModalInstance.close({
action: 'proceed',
calendar: $scope.calendar,
simple: $scope.properties,
vevent: vevent
});
};
$scope.save = function() {
if (!$scope.validate()) {
return;
}
$scope.prepareClose();
$scope.properties.patch();
$uibModalInstance.close({
action: 'save',
calendar: $scope.calendar,
simple: $scope.properties,
vevent: vevent
});
};
$scope.validate = function() {
var error = false;
if ($scope.properties.summary === null || $scope.properties.summary.value.trim() === '') {
OC.Notification.showTemporary(t('calendar', 'Please add a title!'));
error = true;
}
if ($scope.calendar === null || typeof $scope.calendar === 'undefined') {
OC.Notification.showTemporary(t('calendar', 'Please select a calendar!'));
error = true;
}
if (!$scope.properties.checkDtStartBeforeDtEnd()) {
OC.Notification.showTemporary(t('calendar', 'The event ends before it starts!'));
error = true;
}
return !error;
};
$scope.prepareClose = function() {
if ($scope.properties.allDay) {
$scope.properties.dtend.value.add(1, 'days');
}
angular.forEach($scope.postEditingHooks, function(callback) {
callback();
});
};
$scope.cancel = function() {
$uibModalInstance.dismiss('cancel');
};
$scope.delete = function() {
$uibModalInstance.dismiss('delete');
};
$scope.export = function() {
$window.open($scope.oldCalendar.url + vevent.uri);
};
/**
* Everything tabs
*/
$scope.tabopener = function (val) {
$scope.selected = val;
if (val === 0) {
$scope.eventsdetailsview = true;
$scope.eventsattendeeview = false;
$scope.eventsalarmview = false;
$scope.eventsrepeatview = false;
} else if (val === 1) {
$scope.eventsdetailsview = false;
$scope.eventsattendeeview = true;
$scope.eventsalarmview = false;
$scope.eventsrepeatview = false;
} else if (val === 2) {
$scope.eventsdetailsview = false;
$scope.eventsattendeeview = false;
$scope.eventsalarmview = true;
$scope.eventsrepeatview = false;
} else if (val === 3) {
$scope.eventsdetailsview = false;
$scope.eventsattendeeview = false;
$scope.eventsalarmview = false;
$scope.eventsrepeatview = true;
}
};
/**
* Everything calendar select
*/
$scope.selectedCalendarChanged = () => {
if ($scope.calendar.enabled === false) {
$scope.calendar.enabled = true;
$scope.calendar.update();
}
};
$scope.showCalendarSelection = function() {
const writableCalendars = $scope.calendars.filter(function (c) {
return c.isWritable();
});
return writableCalendars.length > 1;
};
/**
* Everything date and time
*/
$scope.$watch('properties.dtstart.value', function(nv, ov) {
var diff = nv.diff(ov, 'seconds');
if (diff !== 0) {
$scope.properties.dtend.value = moment($scope.properties.dtend.value.add(diff, 'seconds'));
}
});
$scope.toggledAllDay = function() {
if ($scope.properties.allDay) {
return;
}
if ($scope.properties.dtstart.value.isSame($scope.properties.dtend.value)) {
$scope.properties.dtend.value = moment($scope.properties.dtend.value.add(1, 'hours'));
}
if ($scope.properties.dtstart.parameters.zone === 'floating' &&
$scope.properties.dtend.parameters.zone === 'floating') {
$scope.properties.dtstart.parameters.zone = $scope.defaulttimezone;
$scope.properties.dtend.parameters.zone = $scope.defaulttimezone;
}
};
$scope.$watch('properties.allDay', $scope.toggledAllDay);
/**
* Everything timezones
*/
TimezoneService.listAll().then(function(list) {
if ($scope.properties.dtstart.parameters.zone !== 'floating' &&
list.indexOf($scope.properties.dtstart.parameters.zone) === -1) {
list.push($scope.properties.dtstart.parameters.zone);
}
if ($scope.properties.dtend.parameters.zone !== 'floating' &&
list.indexOf($scope.properties.dtend.parameters.zone) === -1) {
list.push($scope.properties.dtend.parameters.zone);
}
angular.forEach(list, function(timezone) {
if(timezone === 'GMT' || timezone === 'Z') {
return;
}
if (timezone.split('/').length === 1) {
$scope.timezones.push({
displayname: timezone,
group: t('calendar', 'Global'),
value: timezone
});
} else {
$scope.timezones.push({
displayname: timezone.split('/').slice(1).join('/'),
group: timezone.split('/', 1),
value: timezone
});
}
});
$scope.timezones.push({
displayname: t('calendar', 'None'),
group: t('calendar', 'Global'),
value: 'floating'
});
});
$scope.loadTimezone = function(tzId) {
TimezoneService.get(tzId).then(function(timezone) {
ICAL.TimezoneService.register(tzId, timezone.jCal);
});
};
/**
* Everything location
*/
$scope.searchLocation = function(value) {
return AutoCompletionService.searchLocation(value);
};
$scope.selectLocationFromTypeahead = function(item) {
$scope.properties.location.value = item.label;
};
/**
* Everything access class
*/
$scope.setClassToDefault = function() {
if ($scope.properties.class === null) {
$scope.properties.class = {
type: 'string',
value: 'PUBLIC'
};
}
};
$scope.setStatusToDefault = function() {
if ($scope.properties.status === null) {
$scope.properties.status = {
type: 'string',
value: 'CONFIRMED'
};
}
};
}
]);
| 1 | 5,959 | What about `may not end`? @jancborchardt | nextcloud-calendar | js |
@@ -1236,7 +1236,7 @@ func (a *WebAPI) validateApprover(stages []*model.PipelineStage, commander, stag
return nil
}
}
- return status.Error(codes.FailedPrecondition, fmt.Sprintf("Could not approve the stage because %q isn't set as an approver.", commander))
+ return status.Error(codes.FailedPrecondition, fmt.Sprintf("You can't approve this deployment because you (%s) are not in the approver list: %v", commander, approvers))
}
func (a *WebAPI) GetApplicationLiveState(ctx context.Context, req *webservice.GetApplicationLiveStateRequest) (*webservice.GetApplicationLiveStateResponse, error) { | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpcapi
import (
"context"
"errors"
"fmt"
"strings"
"time"
"github.com/google/uuid"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/pipe-cd/pipe/pkg/app/api/applicationlivestatestore"
"github.com/pipe-cd/pipe/pkg/app/api/commandstore"
"github.com/pipe-cd/pipe/pkg/app/api/service/webservice"
"github.com/pipe-cd/pipe/pkg/app/api/stagelogstore"
"github.com/pipe-cd/pipe/pkg/cache"
"github.com/pipe-cd/pipe/pkg/cache/memorycache"
"github.com/pipe-cd/pipe/pkg/cache/rediscache"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/datastore"
"github.com/pipe-cd/pipe/pkg/filestore"
"github.com/pipe-cd/pipe/pkg/git"
"github.com/pipe-cd/pipe/pkg/insight/insightstore"
"github.com/pipe-cd/pipe/pkg/model"
"github.com/pipe-cd/pipe/pkg/redis"
"github.com/pipe-cd/pipe/pkg/rpc/rpcauth"
)
type encrypter interface {
Encrypt(text string) (string, error)
}
// WebAPI implements the behaviors for the gRPC definitions of WebAPI.
type WebAPI struct {
applicationStore datastore.ApplicationStore
environmentStore datastore.EnvironmentStore
deploymentStore datastore.DeploymentStore
pipedStore datastore.PipedStore
projectStore datastore.ProjectStore
apiKeyStore datastore.APIKeyStore
stageLogStore stagelogstore.Store
applicationLiveStateStore applicationlivestatestore.Store
commandStore commandstore.Store
insightStore insightstore.Store
encrypter encrypter
appProjectCache cache.Cache
deploymentProjectCache cache.Cache
pipedProjectCache cache.Cache
envProjectCache cache.Cache
insightCache cache.Cache
projectsInConfig map[string]config.ControlPlaneProject
logger *zap.Logger
}
// NewWebAPI creates a new WebAPI instance.
func NewWebAPI(
ctx context.Context,
ds datastore.DataStore,
fs filestore.Store,
sls stagelogstore.Store,
alss applicationlivestatestore.Store,
cmds commandstore.Store,
is insightstore.Store,
rd redis.Redis,
projs map[string]config.ControlPlaneProject,
encrypter encrypter,
logger *zap.Logger) *WebAPI {
a := &WebAPI{
applicationStore: datastore.NewApplicationStore(ds),
environmentStore: datastore.NewEnvironmentStore(ds),
deploymentStore: datastore.NewDeploymentStore(ds),
pipedStore: datastore.NewPipedStore(ds),
projectStore: datastore.NewProjectStore(ds),
apiKeyStore: datastore.NewAPIKeyStore(ds),
stageLogStore: sls,
applicationLiveStateStore: alss,
commandStore: cmds,
insightStore: is,
projectsInConfig: projs,
encrypter: encrypter,
appProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
deploymentProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
pipedProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
envProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
insightCache: rediscache.NewTTLCache(rd, 3*time.Hour),
logger: logger.Named("web-api"),
}
return a
}
// Register registers all handling of this service into the specified gRPC server.
func (a *WebAPI) Register(server *grpc.Server) {
webservice.RegisterWebServiceServer(server, a)
}
func (a *WebAPI) AddEnvironment(ctx context.Context, req *webservice.AddEnvironmentRequest) (*webservice.AddEnvironmentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
env := model.Environment{
Id: uuid.New().String(),
Name: req.Name,
Desc: req.Desc,
ProjectId: claims.Role.ProjectId,
}
err = a.environmentStore.AddEnvironment(ctx, &env)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The environment already exists")
}
if err != nil {
a.logger.Error("failed to create environment", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to create environment")
}
return &webservice.AddEnvironmentResponse{}, nil
}
func (a *WebAPI) UpdateEnvironmentDesc(ctx context.Context, req *webservice.UpdateEnvironmentDescRequest) (*webservice.UpdateEnvironmentDescResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *WebAPI) ListEnvironments(ctx context.Context, req *webservice.ListEnvironmentsRequest) (*webservice.ListEnvironmentsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: claims.Role.ProjectId,
},
},
}
envs, err := a.environmentStore.ListEnvironments(ctx, opts)
if err != nil {
a.logger.Error("failed to get environments", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get environments")
}
return &webservice.ListEnvironmentsResponse{
Environments: envs,
}, nil
}
func (a *WebAPI) EnableEnvironment(ctx context.Context, req *webservice.EnableEnvironmentRequest) (*webservice.EnableEnvironmentResponse, error) {
if err := a.updateEnvironmentEnable(ctx, req.EnvironmentId, true); err != nil {
return nil, err
}
return &webservice.EnableEnvironmentResponse{}, nil
}
func (a *WebAPI) DisableEnvironment(ctx context.Context, req *webservice.DisableEnvironmentRequest) (*webservice.DisableEnvironmentResponse, error) {
if err := a.updateEnvironmentEnable(ctx, req.EnvironmentId, false); err != nil {
return nil, err
}
return &webservice.DisableEnvironmentResponse{}, nil
}
// DeleteEnvironment deletes the given environment and all applications that belong to it.
// It returns a FailedPrecondition error if any Piped is still using that environment.
func (a *WebAPI) DeleteEnvironment(ctx context.Context, req *webservice.DeleteEnvironmentRequest) (*webservice.DeleteEnvironmentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateEnvBelongsToProject(ctx, req.EnvironmentId, claims.Role.ProjectId); err != nil {
return nil, err
}
// Check if no Piped has permission to the given environment.
pipeds, err := a.pipedStore.ListPipeds(ctx, datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: claims.Role.ProjectId,
},
{
Field: "EnvIds",
Operator: datastore.OperatorContains,
Value: req.EnvironmentId,
},
{
Field: "Disabled",
Operator: datastore.OperatorEqual,
Value: false,
},
},
})
if err != nil {
a.logger.Error("failed to fetch Pipeds linked to the given environment",
zap.String("env-id", req.EnvironmentId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to validate the deletion operation")
}
if len(pipeds) > 0 {
pipedNames := make([]string, 0, len(pipeds))
for _, p := range pipeds {
pipedNames = append(pipedNames, p.Name)
}
return nil, status.Errorf(
codes.FailedPrecondition,
"Found Pipeds linked the environment to be deleted. Please remove this environment from all Pipeds (%s) on the Piped settings page",
strings.Join(pipedNames, ","),
)
}
// Delete all applications that belongs to the given env.
apps, _, err := a.applicationStore.ListApplications(ctx, datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: claims.Role.ProjectId,
},
{
Field: "EnvId",
Operator: datastore.OperatorEqual,
Value: req.EnvironmentId,
},
},
})
if err != nil {
a.logger.Error("failed to fetch applications that belongs to the given environment",
zap.String("env-id", req.EnvironmentId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to fetch applications that belongs to the given environment")
}
for _, app := range apps {
if app.ProjectId != claims.Role.ProjectId {
continue
}
err := a.applicationStore.DeleteApplication(ctx, app.Id)
if err == nil {
continue
}
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.Internal, "The application is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "Invalid value to delete")
default:
a.logger.Error("failed to delete the application",
zap.String("application-id", app.Id),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to delete the application")
}
}
if err := a.environmentStore.DeleteEnvironment(ctx, req.EnvironmentId); err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.NotFound, "The environment is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "Invalid value to delete")
default:
a.logger.Error("failed to delete the environment",
zap.String("env-id", req.EnvironmentId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to delete the environment")
}
}
return &webservice.DeleteEnvironmentResponse{}, nil
}
func (a *WebAPI) updateEnvironmentEnable(ctx context.Context, envID string, enable bool) error {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return err
}
if err := a.validateEnvBelongsToProject(ctx, envID, claims.Role.ProjectId); err != nil {
return err
}
var updater func(context.Context, string) error
if enable {
updater = a.environmentStore.EnableEnvironment
} else {
updater = a.environmentStore.DisableEnvironment
}
if err := updater(ctx, envID); err != nil {
switch err {
case datastore.ErrNotFound:
return status.Error(codes.NotFound, "The environment is not found")
case datastore.ErrInvalidArgument:
return status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to update the environment",
zap.String("env-id", envID),
zap.Error(err),
)
return status.Error(codes.Internal, "Failed to update the environment")
}
}
return nil
}
// validateEnvBelongsToProject checks if the given piped belongs to the given project.
// It gives back error unless the env belongs to the project.
func (a *WebAPI) validateEnvBelongsToProject(ctx context.Context, envID, projectID string) error {
eid, err := a.envProjectCache.Get(envID)
if err == nil {
if projectID != eid {
return status.Error(codes.PermissionDenied, "Requested environment doesn't belong to the project you logged in")
}
return nil
}
env, err := getEnvironment(ctx, a.environmentStore, envID, a.logger)
if err != nil {
return err
}
a.envProjectCache.Put(envID, env.ProjectId)
if projectID != env.ProjectId {
return status.Error(codes.PermissionDenied, "Requested environment doesn't belong to the project you logged in")
}
return nil
}
func (a *WebAPI) RegisterPiped(ctx context.Context, req *webservice.RegisterPipedRequest) (*webservice.RegisterPipedResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
key, keyHash, err := model.GeneratePipedKey()
if err != nil {
a.logger.Error("failed to generate piped key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to generate the piped key")
}
piped := model.Piped{
Id: uuid.New().String(),
Name: req.Name,
Desc: req.Desc,
ProjectId: claims.Role.ProjectId,
EnvIds: req.EnvIds,
Status: model.Piped_OFFLINE,
}
if err := piped.AddKey(keyHash, claims.Subject, time.Now()); err != nil {
return nil, status.Error(codes.FailedPrecondition, fmt.Sprintf("Failed to create key: %v", err))
}
err = a.pipedStore.AddPiped(ctx, &piped)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The piped already exists")
}
if err != nil {
a.logger.Error("failed to register piped", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to register piped")
}
return &webservice.RegisterPipedResponse{
Id: piped.Id,
Key: key,
}, nil
}
func (a *WebAPI) UpdatePiped(ctx context.Context, req *webservice.UpdatePipedRequest) (*webservice.UpdatePipedResponse, error) {
updater := func(ctx context.Context, pipedID string) error {
return a.pipedStore.UpdatePiped(ctx, req.PipedId, func(p *model.Piped) error {
p.Name = req.Name
p.Desc = req.Desc
p.EnvIds = req.EnvIds
return nil
})
}
if err := a.updatePiped(ctx, req.PipedId, updater); err != nil {
return nil, err
}
return &webservice.UpdatePipedResponse{}, nil
}
func (a *WebAPI) RecreatePipedKey(ctx context.Context, req *webservice.RecreatePipedKeyRequest) (*webservice.RecreatePipedKeyResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
key, keyHash, err := model.GeneratePipedKey()
if err != nil {
a.logger.Error("failed to generate piped key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to generate the piped key")
}
updater := func(ctx context.Context, pipedID string) error {
return a.pipedStore.AddKey(ctx, pipedID, keyHash, claims.Subject, time.Now())
}
if err := a.updatePiped(ctx, req.Id, updater); err != nil {
return nil, err
}
return &webservice.RecreatePipedKeyResponse{
Key: key,
}, nil
}
func (a *WebAPI) DeleteOldPipedKeys(ctx context.Context, req *webservice.DeleteOldPipedKeysRequest) (*webservice.DeleteOldPipedKeysResponse, error) {
if _, err := rpcauth.ExtractClaims(ctx); err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
updater := func(ctx context.Context, pipedID string) error {
return a.pipedStore.DeleteOldKeys(ctx, pipedID)
}
if err := a.updatePiped(ctx, req.PipedId, updater); err != nil {
return nil, err
}
return &webservice.DeleteOldPipedKeysResponse{}, nil
}
func (a *WebAPI) EnablePiped(ctx context.Context, req *webservice.EnablePipedRequest) (*webservice.EnablePipedResponse, error) {
if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.EnablePiped); err != nil {
return nil, err
}
return &webservice.EnablePipedResponse{}, nil
}
func (a *WebAPI) DisablePiped(ctx context.Context, req *webservice.DisablePipedRequest) (*webservice.DisablePipedResponse, error) {
if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.DisablePiped); err != nil {
return nil, err
}
return &webservice.DisablePipedResponse{}, nil
}
func (a *WebAPI) updatePiped(ctx context.Context, pipedID string, updater func(context.Context, string) error) error {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return err
}
if err := a.validatePipedBelongsToProject(ctx, pipedID, claims.Role.ProjectId); err != nil {
return err
}
if err := updater(ctx, pipedID); err != nil {
switch err {
case datastore.ErrNotFound:
return status.Error(codes.InvalidArgument, "The piped is not found")
case datastore.ErrInvalidArgument:
return status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to update the piped",
zap.String("piped-id", pipedID),
zap.Error(err),
)
// TODO: Improve error handling, instead of considering all as Internal error like this
// we should check the error type to decide to pass its message to the web client or just a generic message.
return status.Error(codes.Internal, "Failed to update the piped")
}
}
return nil
}
// TODO: Consider using piped-stats to decide piped connection status.
func (a *WebAPI) ListPipeds(ctx context.Context, req *webservice.ListPipedsRequest) (*webservice.ListPipedsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: claims.Role.ProjectId,
},
},
}
if req.Options != nil {
if req.Options.Enabled != nil {
opts.Filters = append(opts.Filters, datastore.ListFilter{
Field: "Disabled",
Operator: datastore.OperatorEqual,
Value: !req.Options.Enabled.GetValue(),
})
}
}
pipeds, err := a.pipedStore.ListPipeds(ctx, opts)
if err != nil {
a.logger.Error("failed to get pipeds", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get pipeds")
}
// Redact all sensitive data inside piped message before sending to the client.
for i := range pipeds {
pipeds[i].RedactSensitiveData()
}
return &webservice.ListPipedsResponse{
Pipeds: pipeds,
}, nil
}
func (a *WebAPI) GetPiped(ctx context.Context, req *webservice.GetPipedRequest) (*webservice.GetPipedResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger)
if err != nil {
return nil, err
}
if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil {
return nil, err
}
// Redact all sensitive data inside piped message before sending to the client.
piped.RedactSensitiveData()
return &webservice.GetPipedResponse{
Piped: piped,
}, nil
}
func (a *WebAPI) UpdatePipedDesiredVersion(ctx context.Context, req *webservice.UpdatePipedDesiredVersionRequest) (*webservice.UpdatePipedDesiredVersionResponse, error) {
updater := func(ctx context.Context, pipedID string) error {
return a.pipedStore.UpdatePiped(ctx, pipedID, func(p *model.Piped) error {
p.DesiredVersion = req.Version
return nil
})
}
for _, pipedID := range req.PipedIds {
if err := a.updatePiped(ctx, pipedID, updater); err != nil {
return nil, err
}
}
return &webservice.UpdatePipedDesiredVersionResponse{}, nil
}
// validatePipedBelongsToProject checks if the given piped belongs to the given project.
// It gives back error unless the piped belongs to the project.
func (a *WebAPI) validatePipedBelongsToProject(ctx context.Context, pipedID, projectID string) error {
pid, err := a.pipedProjectCache.Get(pipedID)
if err == nil {
if pid != projectID {
return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in")
}
return nil
}
piped, err := getPiped(ctx, a.pipedStore, pipedID, a.logger)
if err != nil {
return err
}
a.pipedProjectCache.Put(pipedID, piped.ProjectId)
if piped.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in")
}
return nil
}
// TODO: Validate the specified piped to ensure that it belongs to the specified environment.
func (a *WebAPI) AddApplication(ctx context.Context, req *webservice.AddApplicationRequest) (*webservice.AddApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger)
if err != nil {
return nil, err
}
if piped.ProjectId != claims.Role.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested piped does not belong to your project")
}
gitpath, err := makeGitPath(
req.GitPath.Repo.Id,
req.GitPath.Path,
req.GitPath.ConfigFilename,
piped,
a.logger,
)
if err != nil {
return nil, err
}
app := model.Application{
Id: uuid.New().String(),
Name: req.Name,
EnvId: req.EnvId,
PipedId: req.PipedId,
ProjectId: claims.Role.ProjectId,
GitPath: gitpath,
Kind: req.Kind,
CloudProvider: req.CloudProvider,
Description: req.Description,
}
err = a.applicationStore.AddApplication(ctx, &app)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The application already exists")
}
if err != nil {
a.logger.Error("failed to create application", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to create application")
}
return &webservice.AddApplicationResponse{
ApplicationId: app.Id,
}, nil
}
func (a *WebAPI) UpdateApplication(ctx context.Context, req *webservice.UpdateApplicationRequest) (*webservice.UpdateApplicationResponse, error) {
updater := func(app *model.Application) error {
app.Name = req.Name
app.EnvId = req.EnvId
app.PipedId = req.PipedId
app.Kind = req.Kind
app.CloudProvider = req.CloudProvider
return nil
}
if err := a.updateApplication(ctx, req.ApplicationId, req.PipedId, updater); err != nil {
return nil, err
}
return &webservice.UpdateApplicationResponse{}, nil
}
func (a *WebAPI) UpdateApplicationDescription(ctx context.Context, req *webservice.UpdateApplicationDescriptionRequest) (*webservice.UpdateApplicationDescriptionResponse, error) {
updater := func(app *model.Application) error {
app.Description = req.Description
return nil
}
if err := a.updateApplication(ctx, req.ApplicationId, "", updater); err != nil {
return nil, err
}
return &webservice.UpdateApplicationDescriptionResponse{}, nil
}
func (a *WebAPI) updateApplication(ctx context.Context, id, pipedID string, updater func(app *model.Application) error) error {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return err
}
// Ensure that the specified piped is assignable for this application.
if pipedID != "" {
piped, err := getPiped(ctx, a.pipedStore, pipedID, a.logger)
if err != nil {
return err
}
if piped.ProjectId != claims.Role.ProjectId {
return status.Error(codes.InvalidArgument, "Requested piped does not belong to your project")
}
}
err = a.applicationStore.UpdateApplication(ctx, id, updater)
if err != nil {
a.logger.Error("failed to update application", zap.Error(err))
return status.Error(codes.Internal, "Failed to update application")
}
return nil
}
func (a *WebAPI) EnableApplication(ctx context.Context, req *webservice.EnableApplicationRequest) (*webservice.EnableApplicationResponse, error) {
if err := a.updateApplicationEnable(ctx, req.ApplicationId, true); err != nil {
return nil, err
}
return &webservice.EnableApplicationResponse{}, nil
}
func (a *WebAPI) DisableApplication(ctx context.Context, req *webservice.DisableApplicationRequest) (*webservice.DisableApplicationResponse, error) {
if err := a.updateApplicationEnable(ctx, req.ApplicationId, false); err != nil {
return nil, err
}
return &webservice.DisableApplicationResponse{}, nil
}
func (a *WebAPI) DeleteApplication(ctx context.Context, req *webservice.DeleteApplicationRequest) (*webservice.DeleteApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil {
return nil, err
}
if err := a.applicationStore.DeleteApplication(ctx, req.ApplicationId); err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.NotFound, "The application is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "Invalid value to delete")
default:
a.logger.Error("failed to delete the application",
zap.String("application-id", req.ApplicationId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to delete the application")
}
}
return &webservice.DeleteApplicationResponse{}, nil
}
func (a *WebAPI) updateApplicationEnable(ctx context.Context, appID string, enable bool) error {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return err
}
if err := a.validateAppBelongsToProject(ctx, appID, claims.Role.ProjectId); err != nil {
return err
}
var updater func(context.Context, string) error
if enable {
updater = a.applicationStore.EnableApplication
} else {
updater = a.applicationStore.DisableApplication
}
if err := updater(ctx, appID); err != nil {
switch err {
case datastore.ErrNotFound:
return status.Error(codes.NotFound, "The application is not found")
case datastore.ErrInvalidArgument:
return status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to update the application",
zap.String("application-id", appID),
zap.Error(err),
)
return status.Error(codes.Internal, "Failed to update the application")
}
}
return nil
}
func (a *WebAPI) ListApplications(ctx context.Context, req *webservice.ListApplicationsRequest) (*webservice.ListApplicationsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
orders := []datastore.Order{
{
Field: "UpdatedAt",
Direction: datastore.Desc,
},
{
Field: "Id",
Direction: datastore.Asc,
},
}
filters := []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: claims.Role.ProjectId,
},
}
if o := req.Options; o != nil {
if o.Enabled != nil {
filters = append(filters, datastore.ListFilter{
Field: "Disabled",
Operator: datastore.OperatorEqual,
Value: !o.Enabled.GetValue(),
})
}
// Allowing multiple so that it can do In Query later.
// Currently only the first value is used.
if len(o.Kinds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "Kind",
Operator: datastore.OperatorEqual,
Value: o.Kinds[0],
})
}
if len(o.SyncStatuses) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "SyncState.Status",
Operator: datastore.OperatorEqual,
Value: o.SyncStatuses[0],
})
}
if len(o.EnvIds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "EnvId",
Operator: datastore.OperatorEqual,
Value: o.EnvIds[0],
})
}
if o.Name != "" {
filters = append(filters, datastore.ListFilter{
Field: "Name",
Operator: datastore.OperatorEqual,
Value: o.Name,
})
}
}
apps, _, err := a.applicationStore.ListApplications(ctx, datastore.ListOptions{
Filters: filters,
Orders: orders,
})
if err != nil {
a.logger.Error("failed to get applications", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get applications")
}
return &webservice.ListApplicationsResponse{
Applications: apps,
}, nil
}
func (a *WebAPI) SyncApplication(ctx context.Context, req *webservice.SyncApplicationRequest) (*webservice.SyncApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger)
if err != nil {
return nil, err
}
if claims.Role.ProjectId != app.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested application does not belong to your project")
}
cmd := model.Command{
Id: uuid.New().String(),
PipedId: app.PipedId,
ApplicationId: app.Id,
ProjectId: app.ProjectId,
Type: model.Command_SYNC_APPLICATION,
Commander: claims.Subject,
SyncApplication: &model.Command_SyncApplication{
ApplicationId: app.Id,
SyncStrategy: req.SyncStrategy,
},
}
if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil {
return nil, err
}
return &webservice.SyncApplicationResponse{
CommandId: cmd.Id,
}, nil
}
func (a *WebAPI) GetApplication(ctx context.Context, req *webservice.GetApplicationRequest) (*webservice.GetApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger)
if err != nil {
return nil, err
}
if app.ProjectId != claims.Role.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested application does not belong to your project")
}
return &webservice.GetApplicationResponse{
Application: app,
}, nil
}
func (a *WebAPI) GenerateApplicationSealedSecret(ctx context.Context, req *webservice.GenerateApplicationSealedSecretRequest) (*webservice.GenerateApplicationSealedSecretResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger)
if err != nil {
return nil, err
}
if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil {
return nil, err
}
se := model.GetSecretEncryptionInPiped(piped)
pubkey, err := getEncriptionKey(se)
if err != nil {
return nil, err
}
ciphertext, err := encrypt(req.Data, pubkey, req.Base64Encoding, a.logger)
if err != nil {
return nil, err
}
return &webservice.GenerateApplicationSealedSecretResponse{
Data: ciphertext,
}, nil
}
// validateAppBelongsToProject checks if the given application belongs to the given project.
// It gives back error unless the application belongs to the project.
func (a *WebAPI) validateAppBelongsToProject(ctx context.Context, appID, projectID string) error {
pid, err := a.appProjectCache.Get(appID)
if err == nil {
if pid != projectID {
return status.Error(codes.PermissionDenied, "Requested application doesn't belong to the project you logged in")
}
return nil
}
app, err := getApplication(ctx, a.applicationStore, appID, a.logger)
if err != nil {
return err
}
a.appProjectCache.Put(appID, app.ProjectId)
if app.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "Requested application doesn't belong to the project you logged in")
}
return nil
}
func (a *WebAPI) ListDeployments(ctx context.Context, req *webservice.ListDeploymentsRequest) (*webservice.ListDeploymentsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
orders := []datastore.Order{
{
Field: "UpdatedAt",
Direction: datastore.Desc,
},
{
Field: "Id",
Direction: datastore.Asc,
},
}
filters := []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: claims.Role.ProjectId,
},
}
if o := req.Options; o != nil {
// Allowing multiple so that it can do In Query later.
// Currently only the first value is used.
if len(o.Statuses) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "Status",
Operator: datastore.OperatorEqual,
Value: o.Statuses[0],
})
}
if len(o.Kinds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "Kind",
Operator: datastore.OperatorEqual,
Value: o.Kinds[0],
})
}
if len(o.ApplicationIds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "ApplicationId",
Operator: datastore.OperatorEqual,
Value: o.ApplicationIds[0],
})
}
if len(o.EnvIds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "EnvId",
Operator: datastore.OperatorEqual,
Value: o.EnvIds[0],
})
}
if o.ApplicationName != "" {
filters = append(filters, datastore.ListFilter{
Field: "ApplicationName",
Operator: datastore.OperatorEqual,
Value: o.ApplicationName,
})
}
}
deployments, cursor, err := a.deploymentStore.ListDeployments(ctx, datastore.ListOptions{
Filters: filters,
Orders: orders,
Limit: int(req.PageSize),
Cursor: req.Cursor,
})
if err != nil {
a.logger.Error("failed to get deployments", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get deployments")
}
return &webservice.ListDeploymentsResponse{
Deployments: deployments,
Cursor: cursor,
}, nil
}
func (a *WebAPI) GetDeployment(ctx context.Context, req *webservice.GetDeploymentRequest) (*webservice.GetDeploymentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger)
if err != nil {
return nil, err
}
if claims.Role.ProjectId != deployment.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested deployment does not belong to your project")
}
return &webservice.GetDeploymentResponse{
Deployment: deployment,
}, nil
}
// validateDeploymentBelongsToProject checks if the given deployment belongs to the given project.
// It gives back error unless the deployment belongs to the project.
func (a *WebAPI) validateDeploymentBelongsToProject(ctx context.Context, deploymentID, projectID string) error {
pid, err := a.deploymentProjectCache.Get(deploymentID)
if err == nil {
if pid != projectID {
return status.Error(codes.PermissionDenied, "Requested deployment doesn't belong to the project you logged in")
}
return nil
}
deployment, err := getDeployment(ctx, a.deploymentStore, deploymentID, a.logger)
if err != nil {
return err
}
a.deploymentProjectCache.Put(deploymentID, deployment.ProjectId)
if deployment.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "Requested deployment doesn't belong to the project you logged in")
}
return nil
}
func (a *WebAPI) GetStageLog(ctx context.Context, req *webservice.GetStageLogRequest) (*webservice.GetStageLogResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil {
return nil, err
}
blocks, completed, err := a.stageLogStore.FetchLogs(ctx, req.DeploymentId, req.StageId, req.RetriedCount, req.OffsetIndex)
if errors.Is(err, stagelogstore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "The stage log not found")
}
if err != nil {
a.logger.Error("failed to get stage logs", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get stage logs")
}
return &webservice.GetStageLogResponse{
Blocks: blocks,
Completed: completed,
}, nil
}
func (a *WebAPI) CancelDeployment(ctx context.Context, req *webservice.CancelDeploymentRequest) (*webservice.CancelDeploymentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger)
if err != nil {
return nil, err
}
if claims.Role.ProjectId != deployment.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested deployment does not belong to your project")
}
if model.IsCompletedDeployment(deployment.Status) {
return nil, status.Errorf(codes.FailedPrecondition, "could not cancel the deployment because it was already completed")
}
cmd := model.Command{
Id: uuid.New().String(),
PipedId: deployment.PipedId,
ApplicationId: deployment.ApplicationId,
ProjectId: deployment.ProjectId,
DeploymentId: req.DeploymentId,
Type: model.Command_CANCEL_DEPLOYMENT,
Commander: claims.Subject,
CancelDeployment: &model.Command_CancelDeployment{
DeploymentId: req.DeploymentId,
ForceRollback: req.ForceRollback,
ForceNoRollback: req.ForceNoRollback,
},
}
if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil {
return nil, err
}
return &webservice.CancelDeploymentResponse{
CommandId: cmd.Id,
}, nil
}
func (a *WebAPI) ApproveStage(ctx context.Context, req *webservice.ApproveStageRequest) (*webservice.ApproveStageResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger)
if err != nil {
return nil, err
}
if err := a.validateApprover(deployment.Stages, claims.Subject, req.StageId); err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil {
return nil, err
}
stage, ok := deployment.StageStatusMap()[req.StageId]
if !ok {
return nil, status.Error(codes.FailedPrecondition, "The stage was not found in the deployment")
}
if model.IsCompletedStage(stage) {
return nil, status.Errorf(codes.FailedPrecondition, "Could not approve the stage because it was already completed")
}
commandID := uuid.New().String()
cmd := model.Command{
Id: commandID,
PipedId: deployment.PipedId,
ApplicationId: deployment.ApplicationId,
ProjectId: deployment.ProjectId,
DeploymentId: req.DeploymentId,
StageId: req.StageId,
Type: model.Command_APPROVE_STAGE,
Commander: claims.Subject,
ApproveStage: &model.Command_ApproveStage{
DeploymentId: req.DeploymentId,
StageId: req.StageId,
},
}
if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil {
return nil, err
}
return &webservice.ApproveStageResponse{
CommandId: commandID,
}, nil
}
// No error means that the given commander is valid.
func (a *WebAPI) validateApprover(stages []*model.PipelineStage, commander, stageID string) error {
var approvers []string
for _, s := range stages {
if s.Id == stageID {
approvers = strings.Split(s.Metadata["Approvers"], ",")
}
}
if len(approvers) == 0 {
// Anyone can approve the deployment pipeline
return nil
}
for _, ap := range approvers {
if ap == commander {
return nil
}
}
return status.Error(codes.FailedPrecondition, fmt.Sprintf("Could not approve the stage because %q isn't set as an approver.", commander))
}
func (a *WebAPI) GetApplicationLiveState(ctx context.Context, req *webservice.GetApplicationLiveStateRequest) (*webservice.GetApplicationLiveStateResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil {
return nil, err
}
snapshot, err := a.applicationLiveStateStore.GetStateSnapshot(ctx, req.ApplicationId)
if errors.Is(err, filestore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "Application live state not found")
}
if err != nil {
a.logger.Error("failed to get application live state", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get application live state")
}
return &webservice.GetApplicationLiveStateResponse{
Snapshot: snapshot,
}, nil
}
// GetProject gets the specified porject without sensitive data.
func (a *WebAPI) GetProject(ctx context.Context, req *webservice.GetProjectRequest) (*webservice.GetProjectResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
project, err := a.getProject(ctx, claims.Role.ProjectId)
if err != nil {
return nil, err
}
// Redact all sensitive data inside project message before sending to the client.
project.RedactSensitiveData()
return &webservice.GetProjectResponse{
Project: project,
}, nil
}
func (a *WebAPI) getProject(ctx context.Context, projectID string) (*model.Project, error) {
if p, ok := a.projectsInConfig[projectID]; ok {
return &model.Project{
Id: p.Id,
Desc: p.Desc,
StaticAdmin: &model.ProjectStaticUser{
Username: p.StaticAdmin.Username,
PasswordHash: p.StaticAdmin.PasswordHash,
},
}, nil
}
project, err := a.projectStore.GetProject(ctx, projectID)
if errors.Is(err, datastore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "The project is not found")
}
if err != nil {
a.logger.Error("failed to get project", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get project")
}
return project, nil
}
// UpdateProjectStaticAdmin updates the static admin user settings.
func (a *WebAPI) UpdateProjectStaticAdmin(ctx context.Context, req *webservice.UpdateProjectStaticAdminRequest) (*webservice.UpdateProjectStaticAdminResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.UpdateProjectStaticAdmin(ctx, claims.Role.ProjectId, req.Username, req.Password); err != nil {
a.logger.Error("failed to update static admin", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update static admin")
}
return &webservice.UpdateProjectStaticAdminResponse{}, nil
}
// EnableStaticAdmin enables static admin login.
func (a *WebAPI) EnableStaticAdmin(ctx context.Context, req *webservice.EnableStaticAdminRequest) (*webservice.EnableStaticAdminResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.EnableStaticAdmin(ctx, claims.Role.ProjectId); err != nil {
a.logger.Error("failed to enable static admin login", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to enable static admin login")
}
return &webservice.EnableStaticAdminResponse{}, nil
}
// DisableStaticAdmin disables static admin login.
func (a *WebAPI) DisableStaticAdmin(ctx context.Context, req *webservice.DisableStaticAdminRequest) (*webservice.DisableStaticAdminResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.DisableStaticAdmin(ctx, claims.Role.ProjectId); err != nil {
a.logger.Error("failed to disenable static admin login", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to disenable static admin login")
}
return &webservice.DisableStaticAdminResponse{}, nil
}
// UpdateProjectSSOConfig updates the sso settings.
func (a *WebAPI) UpdateProjectSSOConfig(ctx context.Context, req *webservice.UpdateProjectSSOConfigRequest) (*webservice.UpdateProjectSSOConfigResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := req.Sso.Encrypt(a.encrypter); err != nil {
a.logger.Error("failed to encrypt sensitive data in sso configurations", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to encrypt sensitive data in sso configurations")
}
if err := a.projectStore.UpdateProjectSSOConfig(ctx, claims.Role.ProjectId, req.Sso); err != nil {
a.logger.Error("failed to update project single sign on settings", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update project single sign on settings")
}
return &webservice.UpdateProjectSSOConfigResponse{}, nil
}
// UpdateProjectRBACConfig updates the sso settings.
func (a *WebAPI) UpdateProjectRBACConfig(ctx context.Context, req *webservice.UpdateProjectRBACConfigRequest) (*webservice.UpdateProjectRBACConfigResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.UpdateProjectRBACConfig(ctx, claims.Role.ProjectId, req.Rbac); err != nil {
a.logger.Error("failed to update project single sign on settings", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update project single sign on settings")
}
return &webservice.UpdateProjectRBACConfigResponse{}, nil
}
// GetMe gets information about the current user.
func (a *WebAPI) GetMe(ctx context.Context, req *webservice.GetMeRequest) (*webservice.GetMeResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
return &webservice.GetMeResponse{
Subject: claims.Subject,
AvatarUrl: claims.AvatarURL,
ProjectId: claims.Role.ProjectId,
ProjectRole: claims.Role.ProjectRole,
}, nil
}
func (a *WebAPI) GetCommand(ctx context.Context, req *webservice.GetCommandRequest) (*webservice.GetCommandResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
cmd, err := getCommand(ctx, a.commandStore, req.CommandId, a.logger)
if err != nil {
return nil, err
}
if claims.Role.ProjectId != cmd.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested command does not belong to your project")
}
return &webservice.GetCommandResponse{
Command: cmd,
}, nil
}
func (a *WebAPI) ListDeploymentConfigTemplates(ctx context.Context, req *webservice.ListDeploymentConfigTemplatesRequest) (*webservice.ListDeploymentConfigTemplatesResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger)
if err != nil {
return nil, err
}
if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil {
return nil, err
}
var templates []*webservice.DeploymentConfigTemplate
switch app.Kind {
case model.ApplicationKind_KUBERNETES:
templates = k8sDeploymentConfigTemplates
case model.ApplicationKind_TERRAFORM:
templates = terraformDeploymentConfigTemplates
case model.ApplicationKind_LAMBDA:
templates = lambdaDeploymentConfigTemplates
case model.ApplicationKind_CLOUDRUN:
templates = cloudrunDeploymentConfigTemplates
case model.ApplicationKind_ECS:
templates = ecsDeploymentConfigTemplates
default:
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Unknown application kind %v", app.Kind))
}
for _, t := range templates {
g := app.GetGitPath()
filename := g.ConfigFilename
if filename == "" {
filename = ".pipe.yaml"
}
t.FileCreationUrl, err = git.MakeFileCreationURL(g.Repo.Remote, g.Path, g.Repo.Branch, filename, t.Content)
if err != nil {
a.logger.Error("failed to make a link to create a file", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to make a link to create a file")
}
}
if len(req.Labels) == 0 {
return &webservice.ListDeploymentConfigTemplatesResponse{Templates: templates}, nil
}
filtered := filterDeploymentConfigTemplates(templates, req.Labels)
return &webservice.ListDeploymentConfigTemplatesResponse{Templates: filtered}, nil
}
// Returns the one from the given templates with all the specified labels.
func filterDeploymentConfigTemplates(templates []*webservice.DeploymentConfigTemplate, labels []webservice.DeploymentConfigTemplateLabel) []*webservice.DeploymentConfigTemplate {
filtered := make([]*webservice.DeploymentConfigTemplate, 0, len(templates))
L:
for _, template := range templates {
for _, l := range labels {
if !template.HasLabel(l) {
continue L
}
}
filtered = append(filtered, template)
}
return filtered
}
func (a *WebAPI) GenerateAPIKey(ctx context.Context, req *webservice.GenerateAPIKeyRequest) (*webservice.GenerateAPIKeyResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
id := uuid.New().String()
key, hash, err := model.GenerateAPIKey(id)
if err != nil {
a.logger.Error("failed to generate API key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to generate API key")
}
apiKey := model.APIKey{
Id: id,
Name: req.Name,
KeyHash: hash,
ProjectId: claims.Role.ProjectId,
Role: req.Role,
Creator: claims.Subject,
}
err = a.apiKeyStore.AddAPIKey(ctx, &apiKey)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The API key already exists")
}
if err != nil {
a.logger.Error("failed to create API key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to create API key")
}
return &webservice.GenerateAPIKeyResponse{
Key: key,
}, nil
}
func (a *WebAPI) DisableAPIKey(ctx context.Context, req *webservice.DisableAPIKeyRequest) (*webservice.DisableAPIKeyResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.apiKeyStore.DisableAPIKey(ctx, req.Id, claims.Role.ProjectId); err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.InvalidArgument, "The API key is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to disable the API key",
zap.String("apikey-id", req.Id),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to disable the API key")
}
}
return &webservice.DisableAPIKeyResponse{}, nil
}
func (a *WebAPI) ListAPIKeys(ctx context.Context, req *webservice.ListAPIKeysRequest) (*webservice.ListAPIKeysResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: claims.Role.ProjectId,
},
},
}
if req.Options != nil {
if req.Options.Enabled != nil {
opts.Filters = append(opts.Filters, datastore.ListFilter{
Field: "Disabled",
Operator: datastore.OperatorEqual,
Value: !req.Options.Enabled.GetValue(),
})
}
}
apiKeys, err := a.apiKeyStore.ListAPIKeys(ctx, opts)
if err != nil {
a.logger.Error("failed to list API keys", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to list API keys")
}
// Redact all sensitive data inside API key before sending to the client.
for i := range apiKeys {
apiKeys[i].RedactSensitiveData()
}
return &webservice.ListAPIKeysResponse{
Keys: apiKeys,
}, nil
}
// GetInsightData returns the accumulated insight data.
func (a *WebAPI) GetInsightData(ctx context.Context, req *webservice.GetInsightDataRequest) (*webservice.GetInsightDataResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
count := int(req.DataPointCount)
from := time.Unix(req.RangeFrom, 0)
chunks, err := insightstore.LoadChunksFromCache(a.insightCache, claims.Role.ProjectId, req.ApplicationId, req.MetricsKind, req.Step, from, count)
if err != nil {
a.logger.Error("failed to load chunks from cache", zap.Error(err))
chunks, err = a.insightStore.LoadChunks(ctx, claims.Role.ProjectId, req.ApplicationId, req.MetricsKind, req.Step, from, count)
if err != nil {
a.logger.Error("failed to load chunks from insightstore", zap.Error(err))
return nil, err
}
if err := insightstore.PutChunksToCache(a.insightCache, chunks); err != nil {
a.logger.Error("failed to put chunks to cache", zap.Error(err))
}
}
idp, err := chunks.ExtractDataPoints(req.Step, from, count)
if err != nil {
a.logger.Error("failed to extract data points from chunks", zap.Error(err))
}
var updateAt int64
for _, c := range chunks {
accumulatedTo := c.GetAccumulatedTo()
if accumulatedTo > updateAt {
updateAt = accumulatedTo
}
}
return &webservice.GetInsightDataResponse{
UpdatedAt: updateAt,
DataPoints: idp,
Type: model.InsightResultType_MATRIX,
Matrix: []*model.InsightSampleStream{
{
DataPoints: idp,
},
},
}, nil
}
func (a *WebAPI) GetInsightApplicationCount(ctx context.Context, req *webservice.GetInsightApplicationCountRequest) (*webservice.GetInsightApplicationCountResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
// TODO: Cache application counts in the cache service.
c, err := a.insightStore.LoadApplicationCounts(ctx, claims.Role.ProjectId)
if err != nil {
if err == filestore.ErrNotFound {
return nil, status.Error(codes.NotFound, "Not found")
}
a.logger.Error("failed to load application counts", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to load application counts")
}
counts := make([]*model.InsightApplicationCount, 0, len(c.Counts))
for i := range c.Counts {
counts = append(counts, &c.Counts[i])
}
return &webservice.GetInsightApplicationCountResponse{
Counts: counts,
UpdatedAt: c.UpdatedAt,
}, nil
}
| 1 | 20,888 | If you think about it, the status should be PermissionDenied? | pipe-cd-pipe | go |
@@ -260,8 +260,7 @@ class MappingJobQueue(threading.Thread):
low_quality_rate=stats["low_quality"] - stats["last_low_quality"],
no_match_rate=stats["no_match"] - stats["last_no_match"],
listens_per_sec=listens_per_sec,
- listen_count=stats["listen_count"],
- listens_matched=stats["listens_matched"],
+ listens_matched_p=stats["listens_matched"] / stats["listen_count"] * 100.0,
legacy_index_date=datetime.date.fromtimestamp(self.legacy_listens_index_date).strftime("%Y-%m-%d"))
stats["last_exact_match"] = stats["exact_match"] | 1 | from concurrent.futures import ThreadPoolExecutor, wait, FIRST_COMPLETED
from dataclasses import dataclass, field
import datetime
from queue import PriorityQueue, Queue, Empty
from typing import Any
from time import monotonic, sleep
import threading
import traceback
from io import StringIO
from flask import current_app
import sqlalchemy
from listenbrainz.listen import Listen
from listenbrainz.db import timescale
from listenbrainz.mbid_mapping_writer.matcher import process_listens
from listenbrainz.labs_api.labs.api.mbid_mapping import MATCH_TYPES
from listenbrainz.utils import init_cache
from listenbrainz.listenstore.timescale_listenstore import DATA_START_YEAR_IN_SECONDS
from listenbrainz import messybrainz as msb_db
from brainzutils import metrics, cache
MAX_THREADS = 3
MAX_QUEUED_JOBS = MAX_THREADS * 2
QUEUE_RELOAD_THRESHOLD = 0
UPDATE_INTERVAL = 30
LEGACY_LISTEN = 1
NEW_LISTEN = 0
# How long to wait if all unmatched listens have been processed before starting the process anew
UNMATCHED_LISTENS_COMPLETED_TIMEOUT = 86400 # in s
# This is the point where the legacy listens should be processed from
LEGACY_LISTENS_LOAD_WINDOW = 86400 * 3 # load 3 days of data per go
LEGACY_LISTENS_INDEX_DATE_CACHE_KEY = "mbid.legacy_index_date"
# How many listens should be re-checked every mapping pass?
NUM_ITEMS_TO_RECHECK_PER_PASS = 100000
# When looking for mapped items marked for re-checking, use this batch size
RECHECK_BATCH_SIZE = 5000
@dataclass(order=True)
class JobItem:
priority: int
item: Any = field(compare=False)
def _add_legacy_listens_to_queue(obj):
return obj.add_legacy_listens_to_queue()
class MappingJobQueue(threading.Thread):
""" This class coordinates incoming listens and legacy listens, giving
priority to new and incoming listens. Threads are fired off as needed
looking up jobs in the background and then this main matcher
thread can deal with the statstics and reporting"""
def __init__(self, app):
threading.Thread.__init__(self)
self.done = False
self.app = app
self.queue = PriorityQueue()
self.unmatched_listens_complete_time = 0
self.legacy_load_thread = None
self.legacy_next_run = 0
self.legacy_listens_index_date = 0
self.num_legacy_listens_loaded = 0
self.last_processed = 0
init_cache(host=app.config['REDIS_HOST'], port=app.config['REDIS_PORT'],
namespace=app.config['REDIS_NAMESPACE'])
metrics.init("listenbrainz")
self.load_legacy_listens()
def add_new_listens(self, listens):
self.queue.put(JobItem(NEW_LISTEN, listens))
def terminate(self):
self.done = True
self.join()
def mark_oldest_no_match_entries_as_stale(self):
"""
THIS FUNCTION IS CURRENTLY UNUSED, BUT WILL BE USED LATER.
"""
query = """UPDATE mbid_mapping
SET last_updated = '1970-01-01'
WHERE match_type = 'no_match'
AND last_updated >= (SELECT last_updated
FROM mbid_mapping
WHERE match_type = 'no_match'
ORDER BY last_updated
OFFSET %s
LIMIT 1);"""
args = (NUM_ITEMS_TO_RECHECK_PER_PASS,)
def load_legacy_listens(self):
""" This function should kick off a thread to load more legacy listens if called.
It may be called multiple times, so it must guard against firing off multiple
threads. """
if self.legacy_load_thread or (self.legacy_next_run and self.legacy_next_run > monotonic()):
return
self.legacy_load_thread = threading.Thread(
target=_add_legacy_listens_to_queue, args=(self,))
self.legacy_load_thread.start()
def fetch_and_queue_listens(self, query, args):
""" Fetch and queue legacy and recheck listens """
msb_query = """SELECT gid AS recording_msid
, rj.data->>'title' AS track_name
, rj.data->>'artist' AS artist_name
FROM recording r
JOIN recording_json rj
ON r.data = rj.id
WHERE gid in :msids"""
count = 0
msids = []
with timescale.engine.connect() as connection:
curs = connection.execute(sqlalchemy.text(query), args)
for row in curs.fetchall():
msids.append(row["recording_msid"])
if len(msids) == 0:
return 0
with msb_db.engine.connect() as connection:
curs = connection.execute(sqlalchemy.text(msb_query), msids=tuple(msids))
while True:
result = curs.fetchone()
if not result:
break
self.queue.put(JobItem(LEGACY_LISTEN, [{"data": {"artist_name": result[2],
"track_name": result[1]},
"recording_msid": result[0],
"legacy": True}]))
count += 1
return count
def add_legacy_listens_to_queue(self):
"""Fetch more legacy listens from the listens table by doing an left join
on the matched listens, finding the next chunk of legacy listens to look up.
Listens are added to the queue with a low priority."""
# Find listens that have no entry in the mapping yet.
legacy_query = """SELECT data->'track_metadata'->'additional_info'->>'recording_msid'::TEXT AS recording_msid
FROM listen
LEFT JOIN mbid_mapping m
ON data->'track_metadata'->'additional_info'->>'recording_msid' = m.recording_msid::text
WHERE m.recording_msid IS NULL
AND listened_at <= :max_ts
AND listened_at > :min_ts"""
# Find mapping rows that need to be rechecked
recheck_query = """SELECT recording_msid
FROM mbid_mapping
WHERE last_updated = '1970-01-01'
LIMIT %d""" % RECHECK_BATCH_SIZE
# Check to see where we need to pick up from, or start new
if not self.legacy_listens_index_date:
dt = cache.get(LEGACY_LISTENS_INDEX_DATE_CACHE_KEY, decode=False) or b""
try:
self.legacy_listens_index_date = int(
datetime.datetime.strptime(str(dt, "utf-8"), "%Y-%m-%d").timestamp())
self.app.logger.info("Loaded date index from cache: %d %s" % (
self.legacy_listens_index_date, str(dt)))
except ValueError:
self.legacy_listens_index_date = int(
datetime.datetime.now().timestamp())
self.app.logger.info("Use date index now()")
# Check to see if we're done
if self.legacy_listens_index_date < DATA_START_YEAR_IN_SECONDS - LEGACY_LISTENS_LOAD_WINDOW:
self.app.logger.info(
"Finished looking up all legacy listens! Wooo!")
self.legacy_next_run = monotonic() + UNMATCHED_LISTENS_COMPLETED_TIMEOUT
self.legacy_listens_index_date = int(datetime.datetime.now().timestamp())
self.num_legacy_listens_loaded = 0
dt = datetime.datetime.fromtimestamp(self.legacy_listens_index_date)
cache.set(LEGACY_LISTENS_INDEX_DATE_CACHE_KEY, dt.strftime("%Y-%m-%d"), expirein=0, encode=False)
return
# Check to see if any listens have been marked for re-check
count = self.fetch_and_queue_listens(recheck_query, {})
if count > 0:
self.app.logger.info("Loaded %d listens to be rechecked." % count)
return
else:
# If none, check for old legacy listens
count = self.fetch_and_queue_listens(legacy_query, {"max_ts": self.legacy_listens_index_date,
"min_ts": self.legacy_listens_index_date - LEGACY_LISTENS_LOAD_WINDOW})
self.app.logger.info("Loaded %s more legacy listens for %s" % (count, datetime.datetime.fromtimestamp(
self.legacy_listens_index_date).strftime("%Y-%m-%d")))
# update cache entry and count
self.legacy_listens_index_date -= LEGACY_LISTENS_LOAD_WINDOW
dt = datetime.datetime.fromtimestamp(self.legacy_listens_index_date)
cache.set(LEGACY_LISTENS_INDEX_DATE_CACHE_KEY, dt.strftime("%Y-%m-%d"), expirein=0, encode=False)
self.num_legacy_listens_loaded = count
def update_metrics(self, stats):
""" Calculate stats and print status to stdout and report metrics."""
if stats["total"] != 0:
if self.last_processed:
listens_per_sec = int(
(stats["processed"] - self.last_processed) / UPDATE_INTERVAL)
else:
listens_per_sec = 0
self.last_processed = stats["processed"]
percent = (stats["exact_match"] + stats["high_quality"] + stats["med_quality"] +
stats["low_quality"]) / stats["total"] * 100.00
self.app.logger.info("total %d matched %d/%d legacy: %d queue: %d %d l/s" %
(stats["total"],
stats["exact_match"] + stats["high_quality"] + stats["med_quality"] + stats["low_quality"],
stats["no_match"],
stats["legacy"],
self.queue.qsize(),
listens_per_sec))
if stats["last_exact_match"] is None:
stats["last_exact_match"] = stats["exact_match"]
stats["last_high_quality"] = stats["high_quality"]
stats["last_med_quality"] = stats["med_quality"]
stats["last_low_quality"] = stats["low_quality"]
stats["last_no_match"] = stats["no_match"]
metrics.set("listenbrainz-mbid-mapping-writer",
total_match_p=percent,
exact_match_p=stats["exact_match"] /
stats["total"] * 100.00,
high_quality_p=stats["high_quality"] /
stats["total"] * 100.00,
med_quality_p=stats["med_quality"] /
stats["total"] * 100.00,
low_quality_p=stats["low_quality"] /
stats["total"] * 100.00,
no_match_p=stats["no_match"] / stats["total"] * 100.00,
errors_p=stats["errors"] / stats["total"] * 100.00,
total_listens=stats["total"],
exact_match=stats["exact_match"],
high_quality=stats["high_quality"],
med_quality=stats["med_quality"],
low_quality=stats["low_quality"],
no_match=stats["no_match"],
errors=stats["errors"],
qsize=self.queue.qsize(),
exact_match_rate=stats["exact_match"] - stats["last_exact_match"],
high_quality_rate=stats["high_quality"] - stats["last_high_quality"],
med_quality_rate=stats["med_quality"] - stats["last_med_quality"],
low_quality_rate=stats["low_quality"] - stats["last_low_quality"],
no_match_rate=stats["no_match"] - stats["last_no_match"],
listens_per_sec=listens_per_sec,
listen_count=stats["listen_count"],
listens_matched=stats["listens_matched"],
legacy_index_date=datetime.date.fromtimestamp(self.legacy_listens_index_date).strftime("%Y-%m-%d"))
stats["last_exact_match"] = stats["exact_match"]
stats["last_high_quality"] = stats["high_quality"]
stats["last_med_quality"] = stats["med_quality"]
stats["last_low_quality"] = stats["low_quality"]
stats["last_no_match"] = stats["no_match"]
def run(self):
""" main thread entry point"""
stats = {"processed": 0,
"total": 0,
"errors": 0,
"listen_count": 0,
"listens_matched": 0,
"legacy": 0,
"legacy_match": 0,
"last_exact_match": None,
"last_high_quality": None,
"last_med_quality": None,
"last_low_quality": None,
"last_no_match": None}
for typ in MATCH_TYPES:
stats[typ] = 0
# Fetch stats of how many items have already been matched.
with timescale.engine.connect() as connection:
query = """SELECT COUNT(*), match_type
FROM mbid_mapping
GROUP BY match_type"""
curs = connection.execute(query)
while True:
result = curs.fetchone()
if not result:
break
stats[result[1]] = result[0]
query = """SELECT COUNT(*)
FROM mbid_mapping_metadata"""
curs = connection.execute(query)
while True:
result = curs.fetchone()
if not result:
break
stats["processed"] = result[0]
stats["total"] = result[0]
# the main thread loop
update_time = monotonic() + UPDATE_INTERVAL
try:
with self.app.app_context():
with ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
futures = {}
while not self.done:
completed, uncompleted = wait(
futures, return_when=FIRST_COMPLETED)
# Check for completed threads and reports errors if any occurred
for complete in completed:
exc = complete.exception()
if exc:
self.app.logger.error("Error in listen mbid mapping writer:", exc_info=exc)
stats["errors"] += 1
else:
job_stats = complete.result()
for stat in job_stats or []:
stats[stat] += job_stats[stat]
del futures[complete]
# Check to see if more legacy listens need to be loaded
for i in range(MAX_QUEUED_JOBS - len(uncompleted)):
try:
job = self.queue.get(False)
except Empty:
sleep(.1)
continue
futures[executor.submit(
process_listens, self.app, job.item, job.priority == LEGACY_LISTEN)] = job.priority
if job.priority == LEGACY_LISTEN:
stats["legacy"] += 1
if self.legacy_load_thread and not self.legacy_load_thread.is_alive():
self.legacy_load_thread = None
if self.queue.qsize() == 0:
self.load_legacy_listens()
if monotonic() > update_time:
update_time = monotonic() + UPDATE_INTERVAL
self.update_metrics(stats)
except Exception as err:
self.app.logger.info(traceback.format_exc())
self.app.logger.info("job queue thread finished")
| 1 | 19,788 | Calculation looks good but don't know what format etc grafana expects this to be in. | metabrainz-listenbrainz-server | py |
@@ -722,6 +722,18 @@ class TestDockerClient:
assert env_variable in stdout
assert "EXISTING_VAR=test_var" in stdout
+ def test_run_with_additional_arguments_add_host(self, docker_client: ContainerClient):
+ additional_flags = "--add-host sometest.localstack.cloud:127.0.0.1"
+ stdout, _ = docker_client.run_container(
+ "alpine",
+ remove=True,
+ command=["getent", "hosts", "sometest.localstack.cloud"],
+ additional_flags=additional_flags,
+ )
+ stdout = stdout.decode(config.DEFAULT_ENCODING)
+ assert "127.0.0.1" in stdout
+ assert "sometest.localstack.cloud" in stdout
+
def test_get_container_ip_non_existing_container(self, docker_client: ContainerClient):
with pytest.raises(NoSuchContainer):
docker_client.get_container_ip("hopefully_non_existent_container_%s" % short_uid()) | 1 | import logging
import re
import time
from subprocess import CalledProcessError
from typing import NamedTuple
import pytest
from localstack import config
from localstack.utils.common import safe_run, short_uid
from localstack.utils.docker import (
ContainerClient,
ContainerException,
DockerContainerStatus,
NoSuchContainer,
NoSuchImage,
PortMappings,
Util,
)
ContainerInfo = NamedTuple(
"ContainerInfo",
[
("container_id", str),
("container_name", str),
],
)
LOG = logging.getLogger(__name__)
container_name_prefix = "lst_test_"
def _random_container_name() -> str:
return f"{container_name_prefix}{short_uid()}"
@pytest.fixture
def dummy_container(create_container):
"""Returns a container that is created but not started"""
return create_container("alpine", command=["sh", "-c", "while true; do sleep 1; done"])
@pytest.fixture
def create_container(docker_client: ContainerClient):
"""
Uses the factory as fixture pattern to wrap ContainerClient.create_container as a factory that
removes the containers after the fixture is cleaned up.
"""
containers = list()
def _create_container(image_name: str, **kwargs):
kwargs["name"] = kwargs.get("name", _random_container_name())
cid = docker_client.create_container(image_name, **kwargs)
cid = cid.strip()
containers.append(cid)
return ContainerInfo(cid, kwargs["name"]) # FIXME name should come from docker_client
yield _create_container
for c in containers:
try:
docker_client.remove_container(c)
except Exception:
LOG.warning("failed to remove test container %s", c)
class TestDockerClient:
def test_container_lifecycle_commands(self, docker_client: ContainerClient):
container_name = _random_container_name()
output = docker_client.create_container(
"alpine",
name=container_name,
command=["sh", "-c", "for i in `seq 30`; do sleep 1; echo $i; done"],
)
container_id = output.strip()
assert container_id
try:
docker_client.start_container(container_id)
assert DockerContainerStatus.UP == docker_client.get_container_status(container_name)
docker_client.stop_container(container_id)
assert DockerContainerStatus.DOWN == docker_client.get_container_status(container_name)
finally:
docker_client.remove_container(container_id)
assert DockerContainerStatus.NON_EXISTENT == docker_client.get_container_status(
container_name
)
def test_create_container_remove_removes_container(
self, docker_client: ContainerClient, create_container
):
info = create_container("alpine", remove=True, command=["echo", "foobar"])
# make sure it was correctly created
assert 1 == len(docker_client.list_containers(f"id={info.container_id}"))
# start the container
output, _ = docker_client.start_container(info.container_id, attach=True)
output = output.decode(config.DEFAULT_ENCODING)
time.sleep(1) # give the docker daemon some time to remove the container after execution
assert 0 == len(docker_client.list_containers(f"id={info.container_id}"))
# it takes a while for it to be removed
assert "foobar" in output
def test_create_container_non_existing_image(self, docker_client: ContainerClient):
with pytest.raises(NoSuchImage):
docker_client.create_container("this_image_does_hopefully_not_exist_42069")
def test_exec_in_container(
self, docker_client: ContainerClient, dummy_container: ContainerInfo
):
docker_client.start_container(dummy_container.container_id)
output, _ = docker_client.exec_in_container(
dummy_container.container_id, command=["echo", "foobar"]
)
output = output.decode(config.DEFAULT_ENCODING)
assert "foobar" == output.strip()
def test_exec_in_container_not_running_raises_exception(
self, docker_client: ContainerClient, dummy_container
):
with pytest.raises(ContainerException):
# can't exec into a non-running container
docker_client.exec_in_container(
dummy_container.container_id, command=["echo", "foobar"]
)
def test_exec_in_container_with_env(self, docker_client: ContainerClient, dummy_container):
docker_client.start_container(dummy_container.container_id)
env = {"MYVAR": "foo_var"}
output, _ = docker_client.exec_in_container(
dummy_container.container_id, env_vars=env, command=["env"]
)
output = output.decode(config.DEFAULT_ENCODING)
assert "MYVAR=foo_var" in output
def test_exec_error_in_container(self, docker_client: ContainerClient, dummy_container):
docker_client.start_container(dummy_container.container_id)
with pytest.raises(ContainerException) as ex:
docker_client.exec_in_container(
dummy_container.container_id, command=["./doesnotexist"]
)
assert ex.match("doesnotexist: no such file or directory")
def test_create_container_with_max_env_vars(
self, docker_client: ContainerClient, create_container
):
# default ARG_MAX=131072 in Docker
env = dict([(f"IVAR_{i:05d}", f"VAL_{i:05d}") for i in range(2000)])
# make sure we're really triggering the relevant code
assert len(str(dict(env))) >= Util.MAX_ENV_ARGS_LENGTH
info = create_container("alpine", env_vars=env, command=["env"])
output, _ = docker_client.start_container(info.container_id, attach=True)
output = output.decode(config.DEFAULT_ENCODING)
assert "IVAR_00001=VAL_00001" in output
assert "IVAR_01000=VAL_01000" in output
assert "IVAR_01999=VAL_01999" in output
def test_run_container(self, docker_client: ContainerClient):
container_name = _random_container_name()
try:
output, _ = docker_client.run_container(
"alpine",
name=container_name,
command=["echo", "foobared"],
)
output = output.decode(config.DEFAULT_ENCODING)
assert "foobared" in output
finally:
docker_client.remove_container(container_name)
def test_run_container_error(self, docker_client: ContainerClient):
container_name = _random_container_name()
try:
with pytest.raises(ContainerException):
docker_client.run_container(
"alpine",
name=container_name,
command=["./doesnotexist"],
)
finally:
docker_client.remove_container(container_name)
def test_stop_non_existing_container(self, docker_client: ContainerClient):
with pytest.raises(NoSuchContainer):
docker_client.stop_container("this_container_does_not_exist")
def test_remove_non_existing_container(self, docker_client: ContainerClient):
with pytest.raises(NoSuchContainer):
docker_client.remove_container("this_container_does_not_exist", force=False)
def test_start_non_existing_container(self, docker_client: ContainerClient):
with pytest.raises(NoSuchContainer):
docker_client.start_container("this_container_does_not_exist")
def test_get_network(self, docker_client: ContainerClient, dummy_container):
n = docker_client.get_network(dummy_container.container_name)
assert "default" == n
def test_create_with_host_network(self, docker_client: ContainerClient, create_container):
info = create_container("alpine", network="host")
network = docker_client.get_network(info.container_name)
assert "host" == network
def test_create_with_port_mapping(self, docker_client: ContainerClient, create_container):
ports = PortMappings()
ports.add(45122, 22)
ports.add(45180, 80)
create_container("alpine", ports=ports)
def test_create_with_volume(self, tmpdir, docker_client: ContainerClient, create_container):
mount_volumes = [(tmpdir.realpath(), "/tmp/mypath")]
c = create_container(
"alpine",
command=["sh", "-c", "echo 'foobar' > /tmp/mypath/foo.log"],
mount_volumes=mount_volumes,
)
docker_client.start_container(c.container_id)
assert tmpdir.join("foo.log").isfile(), "foo.log was not created in mounted dir"
def test_copy_into_container(self, tmpdir, docker_client: ContainerClient, create_container):
local_path = tmpdir.join("myfile.txt")
container_path = "/tmp/myfile_differentpath.txt"
self._test_copy_into_container(
docker_client,
create_container,
["cat", container_path],
local_path,
local_path,
container_path,
)
def test_copy_into_non_existent_container(self, tmpdir, docker_client: ContainerClient):
local_path = tmpdir.mkdir("test_dir")
file_path = local_path.join("test_file")
with file_path.open(mode="w") as fd:
fd.write("foobared\n")
with pytest.raises(NoSuchContainer):
docker_client.copy_into_container(
"hopefully_non_existent_container_%s" % short_uid(), str(file_path), "test_file"
)
def test_copy_into_container_without_target_filename(
self, tmpdir, docker_client: ContainerClient, create_container
):
local_path = tmpdir.join("myfile.txt")
container_path = "/tmp"
self._test_copy_into_container(
docker_client,
create_container,
["cat", "/tmp/myfile.txt"],
local_path,
local_path,
container_path,
)
def test_copy_directory_into_container(
self, tmpdir, docker_client: ContainerClient, create_container
):
local_path = tmpdir.join("fancy_folder")
local_path.mkdir()
file_path = local_path.join("myfile.txt")
container_path = "/tmp/fancy_other_folder"
self._test_copy_into_container(
docker_client,
create_container,
["cat", "/tmp/fancy_other_folder/myfile.txt"],
file_path,
local_path,
container_path,
)
def _test_copy_into_container(
self, docker_client, create_container, command, file_path, local_path, container_path
):
c = create_container("alpine", command=command)
with file_path.open(mode="w") as fd:
fd.write("foobared\n")
docker_client.copy_into_container(c.container_name, str(local_path), container_path)
output, _ = docker_client.start_container(c.container_id, attach=True)
output = output.decode(config.DEFAULT_ENCODING)
assert "foobared" in output
def test_copy_into_container_with_existing_target(
self, tmpdir, docker_client: ContainerClient, dummy_container
):
local_path = tmpdir.join("myfile.txt")
container_path = "/tmp/myfile.txt"
with local_path.open(mode="w") as fd:
fd.write("foo\n")
docker_client.start_container(dummy_container.container_id)
docker_client.exec_in_container(
dummy_container.container_id, command=["sh", "-c", f"echo bar > {container_path}"]
)
out, _ = docker_client.exec_in_container(
dummy_container.container_id,
command=[
"cat",
"/tmp/myfile.txt",
],
)
assert "bar" in out.decode(config.DEFAULT_ENCODING)
docker_client.copy_into_container(
dummy_container.container_id, str(local_path), container_path
)
out, _ = docker_client.exec_in_container(
dummy_container.container_id,
command=[
"cat",
"/tmp/myfile.txt",
],
)
assert "foo" in out.decode(config.DEFAULT_ENCODING)
def test_copy_directory_content_into_container(
self, tmpdir, docker_client: ContainerClient, dummy_container
):
local_path = tmpdir.join("fancy_folder")
local_path.mkdir()
file_path = local_path.join("myfile.txt")
with file_path.open(mode="w") as fd:
fd.write("foo\n")
file_path = local_path.join("myfile2.txt")
with file_path.open(mode="w") as fd:
fd.write("bar\n")
container_path = "/tmp/fancy_other_folder"
docker_client.start_container(dummy_container.container_id)
docker_client.exec_in_container(
dummy_container.container_id, command=["mkdir", "-p", container_path]
)
docker_client.copy_into_container(
dummy_container.container_id, f"{str(local_path)}/.", container_path
)
out, _ = docker_client.exec_in_container(
dummy_container.container_id,
command=[
"cat",
"/tmp/fancy_other_folder/myfile.txt",
"/tmp/fancy_other_folder/myfile2.txt",
],
)
assert "foo" in out.decode(config.DEFAULT_ENCODING)
assert "bar" in out.decode(config.DEFAULT_ENCODING)
def test_get_network_non_existing_container(self, docker_client: ContainerClient):
with pytest.raises(ContainerException):
docker_client.get_network("this_container_does_not_exist")
def test_list_containers(self, docker_client: ContainerClient, create_container):
c1 = create_container("alpine", command=["echo", "1"])
c2 = create_container("alpine", command=["echo", "2"])
c3 = create_container("alpine", command=["echo", "3"])
container_list = docker_client.list_containers()
assert len(container_list) >= 3
image_names = [info["name"] for info in container_list]
assert c1.container_name in image_names
assert c2.container_name in image_names
assert c3.container_name in image_names
def test_list_containers_filter_non_existing(self, docker_client: ContainerClient):
container_list = docker_client.list_containers(filter="id=DOES_NOT_EXST")
assert 0 == len(container_list)
def test_list_containers_filter_illegal_filter(self, docker_client: ContainerClient):
with pytest.raises(ContainerException):
docker_client.list_containers(filter="illegalfilter=foobar")
def test_list_containers_filter(self, docker_client: ContainerClient, create_container):
name_prefix = "filter_tests_"
cn1 = name_prefix + _random_container_name()
cn2 = name_prefix + _random_container_name()
cn3 = name_prefix + _random_container_name()
c1 = create_container("alpine", name=cn1, command=["echo", "1"])
c2 = create_container("alpine", name=cn2, command=["echo", "2"])
c3 = create_container("alpine", name=cn3, command=["echo", "3"])
# per id
container_list = docker_client.list_containers(filter=f"id={c2.container_id}")
assert 1 == len(container_list)
assert c2.container_id.startswith(container_list[0]["id"])
assert c2.container_name == container_list[0]["name"]
assert "created" == container_list[0]["status"]
# per name pattern
container_list = docker_client.list_containers(filter=f"name={name_prefix}")
assert 3 == len(container_list)
image_names = [info["name"] for info in container_list]
assert c1.container_name in image_names
assert c2.container_name in image_names
assert c3.container_name in image_names
# multiple patterns
container_list = docker_client.list_containers(
filter=[
f"id={c1.container_id}",
f"name={container_name_prefix}",
]
)
assert 1 == len(container_list)
assert c1.container_name == container_list[0]["name"]
def test_get_container_entrypoint(self, docker_client: ContainerClient):
entrypoint = docker_client.get_image_entrypoint("alpine")
assert "" == entrypoint
def test_get_container_entrypoint_non_existing_image(self, docker_client: ContainerClient):
with pytest.raises(NoSuchImage):
docker_client.get_image_entrypoint("thisdoesnotexist")
def test_get_container_command(self, docker_client: ContainerClient):
command = docker_client.get_image_cmd("alpine")
assert "/bin/sh" == command
def test_get_container_command_non_existing_image(self, docker_client: ContainerClient):
with pytest.raises(NoSuchImage):
docker_client.get_image_cmd("thisdoesnotexist")
def test_create_start_container_with_stdin_to_stdout(self, docker_client: ContainerClient):
container_name = _random_container_name()
message = "test_message_stdin"
try:
docker_client.create_container(
"alpine",
name=container_name,
interactive=True,
command=["cat"],
)
output, _ = docker_client.start_container(
container_name, interactive=True, stdin=message.encode(config.DEFAULT_ENCODING)
)
assert message == output.decode(config.DEFAULT_ENCODING).strip()
finally:
docker_client.remove_container(container_name)
pass
def test_create_start_container_with_stdin_to_file(
self, tmpdir, docker_client: ContainerClient
):
container_name = _random_container_name()
message = "test_message_stdin"
try:
docker_client.create_container(
"alpine",
name=container_name,
interactive=True,
command=["sh", "-c", "cat > test_file"],
)
output, _ = docker_client.start_container(
container_name, interactive=True, stdin=message.encode(config.DEFAULT_ENCODING)
)
target_path = tmpdir.join("test_file")
docker_client.copy_from_container(container_name, str(target_path), "test_file")
assert message == target_path.read().strip()
finally:
docker_client.remove_container(container_name)
def test_run_container_with_stdin(self, docker_client: ContainerClient):
container_name = _random_container_name()
message = "test_message_stdin"
try:
output, _ = docker_client.run_container(
"alpine",
name=container_name,
interactive=True,
stdin=message.encode(config.DEFAULT_ENCODING),
command=["cat"],
)
assert message == output.decode(config.DEFAULT_ENCODING).strip()
finally:
docker_client.remove_container(container_name)
def test_exec_in_container_with_stdin(self, docker_client: ContainerClient, dummy_container):
docker_client.start_container(dummy_container.container_id)
message = "test_message_stdin"
output, _ = docker_client.exec_in_container(
dummy_container.container_id,
interactive=True,
stdin=message.encode(config.DEFAULT_ENCODING),
command=["cat"],
)
assert message == output.decode(config.DEFAULT_ENCODING).strip()
def test_exec_in_container_with_stdin_stdout_stderr(
self, docker_client: ContainerClient, dummy_container
):
docker_client.start_container(dummy_container.container_id)
message = "test_message_stdin"
output, stderr = docker_client.exec_in_container(
dummy_container.container_id,
interactive=True,
stdin=message.encode(config.DEFAULT_ENCODING),
command=["sh", "-c", "cat; >&2 echo stderrtest"],
)
assert message == output.decode(config.DEFAULT_ENCODING).strip()
assert "stderrtest" == stderr.decode(config.DEFAULT_ENCODING).strip()
def test_run_detached_with_logs(self, docker_client: ContainerClient):
container_name = _random_container_name()
message = "test_message"
try:
output, _ = docker_client.run_container(
"alpine",
name=container_name,
detach=True,
command=["echo", message],
)
container_id = output.decode(config.DEFAULT_ENCODING).strip()
logs = docker_client.get_container_logs(container_id)
assert message == logs.strip()
finally:
docker_client.remove_container(container_name)
def test_get_logs_non_existent_container(self, docker_client: ContainerClient):
with pytest.raises(NoSuchContainer):
docker_client.get_container_logs("container_hopefully_does_not_exist", safe=False)
assert "" == docker_client.get_container_logs(
"container_hopefully_does_not_exist", safe=True
)
def test_pull_docker_image(self, docker_client: ContainerClient):
try:
docker_client.get_image_cmd("alpine")
safe_run([config.DOCKER_CMD, "rmi", "alpine"])
except ContainerException:
pass
with pytest.raises(NoSuchImage):
docker_client.get_image_cmd("alpine")
docker_client.pull_image("alpine")
assert "/bin/sh" == docker_client.get_image_cmd("alpine").strip()
def test_pull_non_existent_docker_image(self, docker_client: ContainerClient):
with pytest.raises(NoSuchImage):
docker_client.pull_image("localstack_non_existing_image_for_tests")
def test_run_container_automatic_pull(self, docker_client: ContainerClient):
try:
safe_run([config.DOCKER_CMD, "rmi", "alpine"])
except CalledProcessError:
pass
message = "test message"
stdout, _ = docker_client.run_container("alpine", command=["echo", message], remove=True)
assert message == stdout.decode(config.DEFAULT_ENCODING).strip()
def test_run_container_non_existent_image(self, docker_client: ContainerClient):
try:
safe_run([config.DOCKER_CMD, "rmi", "alpine"])
except CalledProcessError:
pass
with pytest.raises(NoSuchImage):
stdout, _ = docker_client.run_container(
"localstack_non_existing_image_for_tests", command=["echo", "test"], remove=True
)
def test_running_container_names(self, docker_client: ContainerClient, dummy_container):
docker_client.start_container(dummy_container.container_id)
name = dummy_container.container_name
assert name in docker_client.get_running_container_names()
docker_client.stop_container(name)
assert name not in docker_client.get_running_container_names()
def test_is_container_running(self, docker_client: ContainerClient, dummy_container):
docker_client.start_container(dummy_container.container_id)
name = dummy_container.container_name
assert docker_client.is_container_running(name)
docker_client.stop_container(name)
assert not docker_client.is_container_running(name)
def test_docker_image_names(self, docker_client: ContainerClient):
try:
safe_run([config.DOCKER_CMD, "rmi", "alpine"])
except CalledProcessError:
pass
assert "alpine:latest" not in docker_client.get_docker_image_names()
assert "alpine" not in docker_client.get_docker_image_names()
docker_client.pull_image("alpine")
assert "alpine:latest" in docker_client.get_docker_image_names()
assert "alpine:latest" not in docker_client.get_docker_image_names(include_tags=False)
assert "alpine" in docker_client.get_docker_image_names(include_tags=False)
assert "alpine" in docker_client.get_docker_image_names()
assert "alpine" not in docker_client.get_docker_image_names(strip_latest=False)
def test_get_container_name(self, docker_client: ContainerClient, dummy_container):
docker_client.start_container(dummy_container.container_id)
assert dummy_container.container_name == docker_client.get_container_name(
dummy_container.container_id
)
def test_get_container_name_not_existing(self, docker_client: ContainerClient):
not_existent_container = "not_existing_container"
with pytest.raises(NoSuchContainer):
docker_client.get_container_name(not_existent_container)
def test_get_container_id(self, docker_client: ContainerClient, dummy_container):
docker_client.start_container(dummy_container.container_id)
assert dummy_container.container_id == docker_client.get_container_id(
dummy_container.container_name
)
def test_get_container_id_not_existing(self, docker_client: ContainerClient):
not_existent_container = "not_existing_container"
with pytest.raises(NoSuchContainer):
docker_client.get_container_id(not_existent_container)
def test_inspect_container(self, docker_client: ContainerClient, dummy_container):
docker_client.start_container(dummy_container.container_id)
for identifier in [dummy_container.container_id, dummy_container.container_name]:
assert dummy_container.container_id == docker_client.inspect_container(identifier)["Id"]
assert (
f"/{dummy_container.container_name}"
== docker_client.inspect_container(identifier)["Name"]
)
def test_inspect_image(self, docker_client: ContainerClient):
docker_client.pull_image("alpine")
assert "alpine:latest" == docker_client.inspect_image("alpine")["RepoTags"][0]
def test_copy_from_container(self, tmpdir, docker_client: ContainerClient, dummy_container):
docker_client.start_container(dummy_container.container_id)
local_path = tmpdir.join("test_file")
self._test_copy_from_container(
local_path, local_path, "test_file", docker_client, dummy_container
)
def test_copy_from_container_to_different_file(
self, tmpdir, docker_client: ContainerClient, dummy_container
):
docker_client.start_container(dummy_container.container_id)
local_path = tmpdir.join("test_file_2")
self._test_copy_from_container(
local_path, local_path, "test_file", docker_client, dummy_container
)
def test_copy_from_container_into_directory(
self, tmpdir, docker_client: ContainerClient, dummy_container
):
docker_client.start_container(dummy_container.container_id)
local_path = tmpdir.mkdir("test_dir")
file_path = local_path.join("test_file")
self._test_copy_from_container(
local_path, file_path, "test_file", docker_client, dummy_container
)
def test_copy_from_non_existent_container(self, tmpdir, docker_client: ContainerClient):
local_path = tmpdir.mkdir("test_dir")
with pytest.raises(NoSuchContainer):
docker_client.copy_from_container(
"hopefully_non_existent_container_%s" % short_uid(), str(local_path), "test_file"
)
def _test_copy_from_container(
self,
local_path,
file_path,
container_file_name,
docker_client: ContainerClient,
dummy_container,
):
docker_client.exec_in_container(
dummy_container.container_id,
command=["sh", "-c", f"echo TEST_CONTENT > {container_file_name}"],
)
docker_client.copy_from_container(
dummy_container.container_id,
local_path=str(local_path),
container_path=container_file_name,
)
assert "TEST_CONTENT" == file_path.read().strip()
def test_run_with_additional_arguments(self, docker_client: ContainerClient):
env_variable = "TEST_FLAG=test_str"
stdout, _ = docker_client.run_container(
"alpine", remove=True, command=["env"], additional_flags=f"-e {env_variable}"
)
assert env_variable in stdout.decode(config.DEFAULT_ENCODING)
stdout, _ = docker_client.run_container(
"alpine",
remove=True,
command=["env"],
additional_flags=f"-e {env_variable}",
env_vars={"EXISTING_VAR": "test_var"},
)
stdout = stdout.decode(config.DEFAULT_ENCODING)
assert env_variable in stdout
assert "EXISTING_VAR=test_var" in stdout
def test_get_container_ip_non_existing_container(self, docker_client: ContainerClient):
with pytest.raises(NoSuchContainer):
docker_client.get_container_ip("hopefully_non_existent_container_%s" % short_uid())
def test_get_container_ip(self, docker_client: ContainerClient, dummy_container):
docker_client.start_container(dummy_container.container_id)
ip = docker_client.get_container_ip(dummy_container.container_id)
assert re.match(
r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$",
ip,
)
assert "127.0.0.1" != ip
| 1 | 13,058 | nit: We could use the `to_str(..)` util here. (let's not bother changing for this PR.. ) | localstack-localstack | py |
@@ -29,6 +29,7 @@ import (
"time"
"github.com/gogo/protobuf/types"
+ executiongenproto "github.com/temporalio/temporal/.gen/proto/execution"
tasklistpb "go.temporal.io/temporal-proto/tasklist"
)
| 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package tag
import (
"fmt"
"time"
"github.com/gogo/protobuf/types"
tasklistpb "go.temporal.io/temporal-proto/tasklist"
)
// All logging tags are defined in this file.
// To help finding available tags, we recommend that all tags to be categorized and placed in the corresponding section.
// We currently have those categories:
// 0. Common tags that can't be categorized(or belong to more than one)
// 1. Workflow: these tags are information that are useful to our customer, like workflow-id/run-id/task-list/...
// 2. System : these tags are internal information which usually cannot be understood by our customers,
/////////////////// Common tags defined here ///////////////////
// Error returns tag for Error
func Error(err error) Tag {
return newErrorTag("error", err)
}
// ClusterName returns tag for ClusterName
func ClusterName(clusterName string) Tag {
return newStringTag("cluster-name", clusterName)
}
// Timestamp returns tag for Timestamp
func Timestamp(timestamp time.Time) Tag {
return newTimeTag("timestamp", timestamp)
}
// Timestamp returns tag for Timestamp
func TimestampProto(timestamp *types.Timestamp) Tag {
t, _ := types.TimestampFromProto(timestamp)
return newTimeTag("timestamp", t)
}
// TimestampInt returns tag for Timestamp
func TimestampInt(timestamp int64) Tag {
return newInt64("timestamp", timestamp)
}
/////////////////// Workflow tags defined here: ( wf is short for workflow) ///////////////////
// WorkflowAction returns tag for WorkflowAction
func workflowAction(action string) Tag {
return newPredefinedStringTag("wf-action", action)
}
// WorkflowListFilterType returns tag for WorkflowListFilterType
func workflowListFilterType(listFilterType string) Tag {
return newPredefinedStringTag("wf-list-filter-type", listFilterType)
}
// general
// WorkflowError returns tag for WorkflowError
func WorkflowError(error error) Tag { return newErrorTag("wf-error", error) }
// WorkflowTimeoutType returns tag for WorkflowTimeoutType
func WorkflowTimeoutType(timeoutType int64) Tag {
return newInt64("wf-timeout-type", timeoutType)
}
// WorkflowPollContextTimeout returns tag for WorkflowPollContextTimeout
func WorkflowPollContextTimeout(pollContextTimeout time.Duration) Tag {
return newDurationTag("wf-poll-context-timeout", pollContextTimeout)
}
// WorkflowHandlerName returns tag for WorkflowHandlerName
func WorkflowHandlerName(handlerName string) Tag {
return newStringTag("wf-handler-name", handlerName)
}
// WorkflowID returns tag for WorkflowID
func WorkflowID(workflowID string) Tag {
return newStringTag("wf-id", workflowID)
}
// WorkflowType returns tag for WorkflowType
func WorkflowType(wfType string) Tag {
return newStringTag("wf-type", wfType)
}
// WorkflowState returns tag for WorkflowState
func WorkflowState(s int) Tag {
return newInt("wf-state", s)
}
// WorkflowRunID returns tag for WorkflowRunID
func WorkflowRunID(runID string) Tag {
return newStringTag("wf-run-id", runID)
}
// WorkflowRunID returns tag for WorkflowRunID
func WorkflowRunIDBytes(runID []byte) Tag {
// Todo, we want these to print as hex-encoded (uuid format), binary tag gives us b64
return newBinaryTag("wf-run-id", runID)
}
// WorkflowResetBaseRunID returns tag for WorkflowResetBaseRunID
func WorkflowResetBaseRunID(runID string) Tag {
return newStringTag("wf-reset-base-run-id", runID)
}
// WorkflowResetNewRunID returns tag for WorkflowResetNewRunID
func WorkflowResetNewRunID(runID string) Tag {
return newStringTag("wf-reset-new-run-id", runID)
}
// WorkflowBinaryChecksum returns tag for WorkflowBinaryChecksum
func WorkflowBinaryChecksum(cs string) Tag {
return newStringTag("wf-binary-checksum", cs)
}
// WorkflowActivityID returns tag for WorkflowActivityID
func WorkflowActivityID(id string) Tag {
return newStringTag("wf-activity-id", id)
}
// WorkflowTimerID returns tag for WorkflowTimerID
func WorkflowTimerID(id string) Tag {
return newStringTag("wf-timer-id", id)
}
// WorkflowBeginningRunID returns tag for WorkflowBeginningRunID
func WorkflowBeginningRunID(beginningRunID string) Tag {
return newStringTag("wf-beginning-run-id", beginningRunID)
}
// WorkflowEndingRunID returns tag for WorkflowEndingRunID
func WorkflowEndingRunID(endingRunID string) Tag {
return newStringTag("wf-ending-run-id", endingRunID)
}
// WorkflowDecisionTimeoutSeconds returns tag for WorkflowDecisionTimeoutSeconds
func WorkflowDecisionTimeoutSeconds(s int32) Tag {
return newInt32("wf-decision-timeout", s)
}
// QueryID returns tag for QueryID
func QueryID(queryID string) Tag {
return newStringTag("query-id", queryID)
}
// BlobSizeViolationOperation returns tag for BlobSizeViolationOperation
func BlobSizeViolationOperation(operation string) Tag {
return newStringTag("blob-size-violation-operation", operation)
}
// namespace related
// WorkflowNamespaceID returns tag for WorkflowNamespaceID
func WorkflowNamespaceID(namespaceID string) Tag {
return newStringTag("wf-namespace-id", namespaceID)
}
// WorkflowRunID returns tag for WorkflowRunID
func WorkflowNamespaceIDBytes(namespaceID []byte) Tag {
// Todo, we want these to print as hex-encoded (uuid format), binary tag gives us b64
return newBinaryTag("wf-namespace-id", namespaceID)
}
// WorkflowNamespace returns tag for WorkflowNamespace
func WorkflowNamespace(namespace string) Tag {
return newStringTag("wf-namespace", namespace)
}
// WorkflowNamespaceIDs returns tag for WorkflowNamespaceIDs
func WorkflowNamespaceIDs(namespaceIDs interface{}) Tag {
return newObjectTag("wf-namespace-ids", namespaceIDs)
}
// history event ID related
// WorkflowEventID returns tag for WorkflowEventID
func WorkflowEventID(eventID int64) Tag {
return newInt64("wf-history-event-id", eventID)
}
// WorkflowScheduleID returns tag for WorkflowScheduleID
func WorkflowScheduleID(scheduleID int64) Tag {
return newInt64("wf-schedule-id", scheduleID)
}
// WorkflowStartedID returns tag for WorkflowStartedID
func WorkflowStartedID(id int64) Tag {
return newInt64("wf-started-id", id)
}
// WorkflowInitiatedID returns tag for WorkflowInitiatedID
func WorkflowInitiatedID(id int64) Tag {
return newInt64("wf-initiated-id", id)
}
// WorkflowFirstEventID returns tag for WorkflowFirstEventID
func WorkflowFirstEventID(firstEventID int64) Tag {
return newInt64("wf-first-event-id", firstEventID)
}
// WorkflowNextEventID returns tag for WorkflowNextEventID
func WorkflowNextEventID(nextEventID int64) Tag {
return newInt64("wf-next-event-id", nextEventID)
}
// WorkflowBeginningFirstEventID returns tag for WorkflowBeginningFirstEventID
func WorkflowBeginningFirstEventID(beginningFirstEventID int64) Tag {
return newInt64("wf-begining-first-event-id", beginningFirstEventID)
}
// WorkflowEndingNextEventID returns tag for WorkflowEndingNextEventID
func WorkflowEndingNextEventID(endingNextEventID int64) Tag {
return newInt64("wf-ending-next-event-id", endingNextEventID)
}
// WorkflowResetNextEventID returns tag for WorkflowResetNextEventID
func WorkflowResetNextEventID(resetNextEventID int64) Tag {
return newInt64("wf-reset-next-event-id", resetNextEventID)
}
// history tree
// WorkflowTreeID returns tag for WorkflowTreeID
func WorkflowTreeID(treeID string) Tag {
return newStringTag("wf-tree-id", treeID)
}
// WorkflowBranchID returns tag for WorkflowBranchID
func WorkflowBranchID(branchID string) Tag {
return newStringTag("wf-branch-id", branchID)
}
// WorkflowTreeIDBytes returns tag for WorkflowTreeIDBytes
func WorkflowTreeIDBytes(treeIDBytes []byte) Tag {
return newBinaryTag("wf-tree-id", treeIDBytes)
}
// WorkflowBranchIDBytes returns tag for WorkflowBranchIDBytes
func WorkflowBranchIDBytes(branchIDBytes []byte) Tag {
return newBinaryTag("wf-branch-id", branchIDBytes)
}
// workflow task
// WorkflowDecisionType returns tag for WorkflowDecisionType
func WorkflowDecisionType(decisionType int64) Tag {
return newInt64("wf-decision-type", decisionType)
}
// WorkflowQueryType returns tag for WorkflowQueryType
func WorkflowQueryType(qt string) Tag {
return newStringTag("wf-query-type", qt)
}
// WorkflowDecisionFailCause returns tag for WorkflowDecisionFailCause
func WorkflowDecisionFailCause(decisionFailCause int64) Tag {
return newInt64("wf-decision-fail-cause", decisionFailCause)
}
// WorkflowTaskListType returns tag for WorkflowTaskListType
func WorkflowTaskListType(taskListType tasklistpb.TaskListType) Tag {
return newInt32("wf-task-list-type", int32(taskListType))
}
// WorkflowTaskListName returns tag for WorkflowTaskListName
func WorkflowTaskListName(taskListName string) Tag {
return newStringTag("wf-task-list-name", taskListName)
}
// size limit
// WorkflowSize returns tag for WorkflowSize
func WorkflowSize(workflowSize int64) Tag {
return newInt64("wf-size", workflowSize)
}
// WorkflowSignalCount returns tag for SignalCount
func WorkflowSignalCount(signalCount int32) Tag {
return newInt32("wf-signal-count", signalCount)
}
// WorkflowHistorySize returns tag for HistorySize
func WorkflowHistorySize(historySize int) Tag {
return newInt("wf-history-size", historySize)
}
// WorkflowHistorySizeBytes returns tag for HistorySizeBytes
func WorkflowHistorySizeBytes(historySizeBytes int) Tag {
return newInt("wf-history-size-bytes", historySizeBytes)
}
// WorkflowEventCount returns tag for EventCount
func WorkflowEventCount(eventCount int) Tag {
return newInt("wf-event-count", eventCount)
}
/////////////////// System tags defined here: ///////////////////
// Tags with pre-define values
// Component returns tag for Component
func component(component string) Tag {
return newPredefinedStringTag("component", component)
}
// Lifecycle returns tag for Lifecycle
func lifecycle(lifecycle string) Tag {
return newPredefinedStringTag("lifecycle", lifecycle)
}
// StoreOperation returns tag for StoreOperation
func storeOperation(storeOperation string) Tag {
return newPredefinedStringTag("store-operation", storeOperation)
}
// OperationResult returns tag for OperationResult
func operationResult(operationResult string) Tag {
return newPredefinedStringTag("operation-result", operationResult)
}
// ErrorType returns tag for ErrorType
func errorType(errorType string) Tag {
return newPredefinedStringTag("error", errorType)
}
// Shardupdate returns tag for Shardupdate
func shardupdate(shardupdate string) Tag {
return newPredefinedStringTag("shard-update", shardupdate)
}
// general
// Service returns tag for Service
func Service(sv string) Tag {
return newStringTag("service", sv)
}
// Addresses returns tag for Addresses
func Addresses(ads []string) Tag {
return newObjectTag("addresses", ads)
}
// ListenerName returns tag for ListenerName
func ListenerName(name string) Tag {
return newStringTag("listener-name", name)
}
// Address return tag for Address
func Address(ad string) Tag {
return newStringTag("address", ad)
}
// HostID return tag for HostID
func HostID(hid string) Tag {
return newStringTag("hostId", hid)
}
// Key returns tag for Key
func Key(k string) Tag {
return newStringTag("key", k)
}
// Name returns tag for Name
func Name(k string) Tag {
return newStringTag("name", k)
}
// Value returns tag for Value
func Value(v interface{}) Tag {
return newObjectTag("value", v)
}
// ValueType returns tag for ValueType
func ValueType(v interface{}) Tag {
return newStringTag("value-type", fmt.Sprintf("%T", v))
}
// DefaultValue returns tag for DefaultValue
func DefaultValue(v interface{}) Tag {
return newObjectTag("default-value", v)
}
// IgnoredValue returns tag for IgnoredValue
func IgnoredValue(v interface{}) Tag {
return newObjectTag("ignored-value", v)
}
// Port returns tag for Port
func Port(p int) Tag {
return newInt("port", p)
}
// CursorTimestamp returns tag for CursorTimestamp
func CursorTimestamp(timestamp time.Time) Tag {
return newTimeTag("cursor-timestamp", timestamp)
}
// MetricScope returns tag for MetricScope
func MetricScope(metricScope int) Tag {
return newInt("metric-scope", metricScope)
}
// StoreType returns tag for StoreType
func StoreType(storeType string) Tag {
return newPredefinedStringTag("store-type", storeType)
}
// DetailInfo returns tag for DetailInfo
func DetailInfo(i string) Tag {
return newStringTag("detail-info", i)
}
// Counter returns tag for Counter
func Counter(c int) Tag {
return newInt("counter", c)
}
// Number returns tag for Number
func Number(n int64) Tag {
return newInt64("number", n)
}
// NextNumber returns tag for NextNumber
func NextNumber(n int64) Tag {
return newInt64("next-number", n)
}
// Bool returns tag for Bool
func Bool(b bool) Tag {
return newBoolTag("bool", b)
}
// history engine shard
// ShardID returns tag for ShardID
func ShardID(shardID int) Tag {
return newInt("shard-id", shardID)
}
// ShardTime returns tag for ShardTime
func ShardTime(shardTime interface{}) Tag {
return newObjectTag("shard-time", shardTime)
}
// ShardReplicationAck returns tag for ShardReplicationAck
func ShardReplicationAck(shardReplicationAck int64) Tag {
return newInt64("shard-replication-ack", shardReplicationAck)
}
// PreviousShardRangeID returns tag for PreviousShardRangeID
func PreviousShardRangeID(id int64) Tag {
return newInt64("previous-shard-range-id", id)
}
// ShardRangeID returns tag for ShardRangeID
func ShardRangeID(id int64) Tag {
return newInt64("shard-range-id", id)
}
// ReadLevel returns tag for ReadLevel
func ReadLevel(lv int64) Tag {
return newInt64("read-level", lv)
}
// MinLevel returns tag for MinLevel
func MinLevel(lv int64) Tag {
return newInt64("min-level", lv)
}
// MaxLevel returns tag for MaxLevel
func MaxLevel(lv int64) Tag {
return newInt64("max-level", lv)
}
// ShardTransferAcks returns tag for ShardTransferAcks
func ShardTransferAcks(shardTransferAcks interface{}) Tag {
return newObjectTag("shard-transfer-acks", shardTransferAcks)
}
// ShardTimerAcks returns tag for ShardTimerAcks
func ShardTimerAcks(shardTimerAcks interface{}) Tag {
return newObjectTag("shard-timer-acks", shardTimerAcks)
}
// task queue processor
// Task returns tag for Task
func Task(task interface{}) Tag {
return newObjectTag("queue-task", task)
}
// AckLevel returns tag for upper ack level
func Tasks(s interface{}) Tag {
return newObjectTag("tasks", s)
}
// TaskID returns tag for TaskID
func TaskID(taskID int64) Tag {
return newInt64("queue-task-id", taskID)
}
// TaskType returns tag for TaskType for queue processor
func TaskType(taskType int32) Tag {
return newInt32("queue-task-type", taskType)
}
// TaskVersion returns tag for TaskVersion
func TaskVersion(taskVersion int64) Tag {
return newInt64("queue-task-version", taskVersion)
}
// TaskVisibilityTimestamp returns tag for task visibilityTimestamp
func TaskVisibilityTimestamp(timestamp int64) Tag {
return newInt64("queue-task-visibility-timestamp", timestamp)
}
// NumberProcessed returns tag for NumberProcessed
func NumberProcessed(n int) Tag {
return newInt("number-processed", n)
}
// NumberDeleted returns tag for NumberDeleted
func NumberDeleted(n int) Tag {
return newInt("number-deleted", n)
}
// TimerTaskStatus returns tag for TimerTaskStatus
func TimerTaskStatus(timerTaskStatus int32) Tag {
return newInt32("timer-task-status", timerTaskStatus)
}
// retry
// Attempt returns tag for Attempt
func Attempt(attempt int32) Tag {
return newInt32("attempt", attempt)
}
// AttemptCount returns tag for AttemptCount
func AttemptCount(attemptCount int64) Tag {
return newInt64("attempt-count", attemptCount)
}
// AttemptStart returns tag for AttemptStart
func AttemptStart(attemptStart time.Time) Tag {
return newTimeTag("attempt-start", attemptStart)
}
// AttemptEnd returns tag for AttemptEnd
func AttemptEnd(attemptEnd time.Time) Tag {
return newTimeTag("attempt-end", attemptEnd)
}
// ScheduleAttempt returns tag for ScheduleAttempt
func ScheduleAttempt(scheduleAttempt int64) Tag {
return newInt64("schedule-attempt", scheduleAttempt)
}
// ElasticSearch
// ESRequest returns tag for ESRequest
func ESRequest(ESRequest string) Tag {
return newStringTag("es-request", ESRequest)
}
// ESResponseStatus returns tag for ESResponse status
func ESResponseStatus(status int) Tag {
return newInt("es-response-status", status)
}
// ESResponseError returns tag for ESResponse error
func ESResponseError(msg string) Tag {
return newStringTag("es-response-error", msg)
}
// ESKey returns tag for ESKey
func ESKey(ESKey string) Tag {
return newStringTag("es-mapping-key", ESKey)
}
// ESValue returns tag for ESValue
func ESValue(ESValue []byte) Tag {
// convert value to string type so that the value logged is human readable
return newStringTag("es-mapping-value", string(ESValue))
}
// ESConfig returns tag for ESConfig
func ESConfig(c interface{}) Tag {
return newObjectTag("es-config", c)
}
// ESField returns tag for ESField
func ESField(ESField string) Tag {
return newStringTag("es-field", ESField)
}
// ESDocID returns tag for ESDocID
func ESDocID(id string) Tag {
return newStringTag("es-doc-id", id)
}
// LoggingCallAtKey is reserved tag
const LoggingCallAtKey = "logging-call-at"
// SysStackTrace returns tag for SysStackTrace
func SysStackTrace(stackTrace string) Tag {
return newStringTag("sys-stack-trace", stackTrace)
}
// Kafka related
// KafkaTopicName returns tag for TopicName
func KafkaTopicName(topicName string) Tag {
return newStringTag("kafka-topic-name", topicName)
}
// KafkaConsumerName returns tag for ConsumerName
func KafkaConsumerName(consumerName string) Tag {
return newStringTag("kafka-consumer-name", consumerName)
}
// KafkaPartition returns tag for Partition
func KafkaPartition(partition int32) Tag {
return newInt32("kafka-partition", partition)
}
// KafkaPartitionKey returns tag for PartitionKey
func KafkaPartitionKey(partitionKey interface{}) Tag {
return newObjectTag("kafka-partition-key", partitionKey)
}
// KafkaOffset returns tag for Offset
func KafkaOffset(offset int64) Tag {
return newInt64("kafka-offset", offset)
}
// TokenLastEventID returns tag for TokenLastEventID
func TokenLastEventID(id int64) Tag {
return newInt64("token-last-event-id", id)
}
/////////////////// XDC tags defined here: xdc- ///////////////////
// SourceCluster returns tag for SourceCluster
func SourceCluster(sourceCluster string) Tag {
return newStringTag("xdc-source-cluster", sourceCluster)
}
// PrevActiveCluster returns tag for PrevActiveCluster
func PrevActiveCluster(prevActiveCluster string) Tag {
return newStringTag("xdc-prev-active-cluster", prevActiveCluster)
}
// FailoverMsg returns tag for FailoverMsg
func FailoverMsg(failoverMsg string) Tag {
return newStringTag("xdc-failover-msg", failoverMsg)
}
// FailoverVersion returns tag for Version
func FailoverVersion(version int64) Tag {
return newInt64("xdc-failover-version", version)
}
// CurrentVersion returns tag for CurrentVersion
func CurrentVersion(currentVersion int64) Tag {
return newInt64("xdc-current-version", currentVersion)
}
// IncomingVersion returns tag for IncomingVersion
func IncomingVersion(incomingVersion int64) Tag {
return newInt64("xdc-incoming-version", incomingVersion)
}
// ReplicationInfo returns tag for ReplicationInfo
func ReplicationInfo(replicationInfo interface{}) Tag {
return newObjectTag("xdc-replication-info", replicationInfo)
}
// ReplicationState returns tag for ReplicationState
func ReplicationState(replicationState interface{}) Tag {
return newObjectTag("xdc-replication-state", replicationState)
}
// FirstEventVersion returns tag for FirstEventVersion
func FirstEventVersion(version int64) Tag {
return newInt64("xdc-first-event-version", version)
}
// LastEventVersion returns tag for LastEventVersion
func LastEventVersion(version int64) Tag {
return newInt64("xdc-last-event-version", version)
}
// TokenLastEventVersion returns tag for TokenLastEventVersion
func TokenLastEventVersion(version int64) Tag {
return newInt64("xdc-token-last-event-version", version)
}
/////////////////// Archival tags defined here: archival- ///////////////////
// archival request tags
// ArchivalCallerServiceName returns tag for the service name calling archival client
func ArchivalCallerServiceName(callerServiceName string) Tag {
return newStringTag("archival-caller-service-name", callerServiceName)
}
// ArchivalArchiveAttemptedInline returns tag for whether archival is attempted inline before signal is sent.
func ArchivalArchiveAttemptedInline(archiveInline bool) Tag {
return newBoolTag("archival-archive-attempted-inline", archiveInline)
}
// ArchivalRequestNamespaceID returns tag for RequestNamespaceID
func ArchivalRequestNamespaceID(requestNamespaceID string) Tag {
return newStringTag("archival-request-namespace-id", requestNamespaceID)
}
// ArchivalRequestNamespace returns tag for RequestNamespace
func ArchivalRequestNamespace(requestNamespace string) Tag {
return newStringTag("archival-request-namespace", requestNamespace)
}
// ArchivalRequestWorkflowID returns tag for RequestWorkflowID
func ArchivalRequestWorkflowID(requestWorkflowID string) Tag {
return newStringTag("archival-request-workflow-id", requestWorkflowID)
}
// ArchvialRequestWorkflowType returns tag for RequestWorkflowType
func ArchvialRequestWorkflowType(requestWorkflowType string) Tag {
return newStringTag("archival-request-workflow-type", requestWorkflowType)
}
// ArchivalRequestRunID returns tag for RequestRunID
func ArchivalRequestRunID(requestRunID string) Tag {
return newStringTag("archival-request-run-id", requestRunID)
}
// ArchivalRequestBranchToken returns tag for RequestBranchToken
func ArchivalRequestBranchToken(requestBranchToken []byte) Tag {
return newObjectTag("archival-request-branch-token", requestBranchToken)
}
// ArchivalRequestNextEventID returns tag for RequestNextEventID
func ArchivalRequestNextEventID(requestNextEventID int64) Tag {
return newInt64("archival-request-next-event-id", requestNextEventID)
}
// ArchivalRequestCloseFailoverVersion returns tag for RequestCloseFailoverVersion
func ArchivalRequestCloseFailoverVersion(requestCloseFailoverVersion int64) Tag {
return newInt64("archival-request-close-failover-version", requestCloseFailoverVersion)
}
// ArchivalRequestCloseTimestamp returns tag for RequestCloseTimestamp
func ArchivalRequestCloseTimestamp(requestCloseTimeStamp int64) Tag {
return newInt64("archival-request-close-timestamp", requestCloseTimeStamp)
}
// ArchivalRequestStatus returns tag for RequestStatus
func ArchivalRequestStatus(requestStatus string) Tag {
return newStringTag("archival-request-status", requestStatus)
}
// ArchivalURI returns tag for Archival URI
func ArchivalURI(URI string) Tag {
return newStringTag("archival-URI", URI)
}
// ArchivalArchiveFailReason returns tag for ArchivalArchiveFailReason
func ArchivalArchiveFailReason(archiveFailReason string) Tag {
return newStringTag("archival-archive-fail-reason", archiveFailReason)
}
// ArchivalDeleteHistoryFailReason returns tag for ArchivalDeleteHistoryFailReason
func ArchivalDeleteHistoryFailReason(deleteHistoryFailReason string) Tag {
return newStringTag("archival-delete-history-fail-reason", deleteHistoryFailReason)
}
// ArchivalVisibilityQuery returns tag for the query for getting archived visibility record
func ArchivalVisibilityQuery(query string) Tag {
return newStringTag("archival-visibility-query", query)
}
// The following logger tags are only used by internal archiver implemention.
// TODO: move them to internal repo once temporal plugin model is in place.
// ArchivalBlobKey returns tag for BlobKey
func ArchivalBlobKey(blobKey string) Tag {
return newStringTag("archival-blob-key", blobKey)
}
// ArchivalDeterministicConstructionCheckFailReason returns tag for ArchivalDeterministicConstructionCheckFailReason
func ArchivalDeterministicConstructionCheckFailReason(deterministicConstructionCheckFailReason string) Tag {
return newStringTag("archival-deterministic-construction-check-fail-reason", deterministicConstructionCheckFailReason)
}
// ArchivalNonDeterministicBlobKey returns tag for randomly generated NonDeterministicBlobKey
func ArchivalNonDeterministicBlobKey(nondeterministicBlobKey string) Tag {
return newStringTag("archival-non-deterministic-blob-key", nondeterministicBlobKey)
}
// ArchivalBlobIntegrityCheckFailReason returns tag for ArchivalBlobIntegrityCheckFailReason
func ArchivalBlobIntegrityCheckFailReason(blobIntegrityCheckFailReason string) Tag {
return newStringTag("archival-blob-integrity-check-fail-reason", blobIntegrityCheckFailReason)
}
// ArchivalBlobstoreContextTimeout returns tag for ArchivalBlobstoreContextTimeout
func ArchivalBlobstoreContextTimeout(blobstoreContextTimeout time.Duration) Tag {
return newDurationTag("archival-blobstore-context-timeout", blobstoreContextTimeout)
}
// TransportType returns tag for transportType
func TransportType(transportType string) Tag {
return newStringTag("transport-type", transportType)
}
// ActivityInfo returns tag for activity info
func ActivityInfo(activityInfo interface{}) Tag {
return newObjectTag("activity-info", activityInfo)
}
// DecisionRequestId returns tag for decision RequestId
func DecisionRequestId(s string) Tag {
return newStringTag("decision-request-id", s)
}
// AckLevel returns tag for ack level
func AckLevel(s interface{}) Tag {
return newObjectTag("ack-level", s)
}
// QueryLevel returns tag for query level
func QueryLevel(s time.Time) Tag {
return newTimeTag("query-level", s)
}
// TaskListInfo returns tag for task list info
func TaskListInfo(s interface{}) Tag {
return newObjectTag("task-list-info", s)
}
| 1 | 9,560 | Can you rename alias to `executiongenpb`. The rule is package+"pb" for `temporal-proto` and package+"genpb" for server protos. | temporalio-temporal | go |
@@ -261,7 +261,9 @@ func addNodeToResults(n *node, results *SublistResult) {
if i := findQSliceForSub(qr[0], results.qsubs); i >= 0 {
results.qsubs[i] = append(results.qsubs[i], qr...)
} else {
- results.qsubs = append(results.qsubs, qr)
+ copyqr := make([]*subscription, len(qr))
+ copy(copyqr, qr)
+ results.qsubs = append(results.qsubs, copyqr)
}
}
} | 1 | // Copyright 2016-2017 Apcera Inc. All rights reserved.
// Package sublist is a routing mechanism to handle subject distribution
// and provides a facility to match subjects from published messages to
// interested subscribers. Subscribers can have wildcard subjects to match
// multiple published subjects.
package server
import (
"bytes"
"errors"
"strings"
"sync"
"sync/atomic"
)
// Common byte variables for wildcards and token separator.
const (
pwc = '*'
fwc = '>'
tsep = "."
btsep = '.'
)
// Sublist related errors
var (
ErrInvalidSubject = errors.New("sublist: Invalid Subject")
ErrNotFound = errors.New("sublist: No Matches Found")
)
// cacheMax is used to bound limit the frontend cache
const slCacheMax = 1024
// A result structure better optimized for queue subs.
type SublistResult struct {
psubs []*subscription
qsubs [][]*subscription // don't make this a map, too expensive to iterate
}
// A Sublist stores and efficiently retrieves subscriptions.
type Sublist struct {
sync.RWMutex
genid uint64
matches uint64
cacheHits uint64
inserts uint64
removes uint64
cache map[string]*SublistResult
root *level
count uint32
}
// A node contains subscriptions and a pointer to the next level.
type node struct {
next *level
psubs []*subscription
qsubs [][]*subscription
}
// A level represents a group of nodes and special pointers to
// wildcard nodes.
type level struct {
nodes map[string]*node
pwc, fwc *node
}
// Create a new default node.
func newNode() *node {
return &node{psubs: make([]*subscription, 0, 4)}
}
// Create a new default level. We use FNV1A as the hash
// algorithm for the tokens, which should be short.
func newLevel() *level {
return &level{nodes: make(map[string]*node)}
}
// New will create a default sublist
func NewSublist() *Sublist {
return &Sublist{root: newLevel(), cache: make(map[string]*SublistResult)}
}
// Insert adds a subscription into the sublist
func (s *Sublist) Insert(sub *subscription) error {
// copy the subject since we hold this and this might be part of a large byte slice.
subject := string(sub.subject)
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
s.Lock()
sfwc := false
l := s.root
var n *node
for _, t := range tokens {
lt := len(t)
if lt == 0 || sfwc {
s.Unlock()
return ErrInvalidSubject
}
if lt > 1 {
n = l.nodes[t]
} else {
switch t[0] {
case pwc:
n = l.pwc
case fwc:
n = l.fwc
sfwc = true
default:
n = l.nodes[t]
}
}
if n == nil {
n = newNode()
if lt > 1 {
l.nodes[t] = n
} else {
switch t[0] {
case pwc:
l.pwc = n
case fwc:
l.fwc = n
default:
l.nodes[t] = n
}
}
}
if n.next == nil {
n.next = newLevel()
}
l = n.next
}
if sub.queue == nil {
n.psubs = append(n.psubs, sub)
} else {
// This is a queue subscription
if i := findQSliceForSub(sub, n.qsubs); i >= 0 {
n.qsubs[i] = append(n.qsubs[i], sub)
} else {
n.qsubs = append(n.qsubs, []*subscription{sub})
}
}
s.count++
s.inserts++
s.addToCache(subject, sub)
atomic.AddUint64(&s.genid, 1)
s.Unlock()
return nil
}
// Deep copy
func copyResult(r *SublistResult) *SublistResult {
nr := &SublistResult{}
nr.psubs = append([]*subscription(nil), r.psubs...)
for _, qr := range r.qsubs {
nqr := append([]*subscription(nil), qr...)
nr.qsubs = append(nr.qsubs, nqr)
}
return nr
}
// addToCache will add the new entry to existing cache
// entries if needed. Assumes write lock is held.
func (s *Sublist) addToCache(subject string, sub *subscription) {
for k, r := range s.cache {
if matchLiteral(k, subject) {
// Copy since others may have a reference.
nr := copyResult(r)
if sub.queue == nil {
nr.psubs = append(nr.psubs, sub)
} else {
if i := findQSliceForSub(sub, nr.qsubs); i >= 0 {
nr.qsubs[i] = append(nr.qsubs[i], sub)
} else {
nr.qsubs = append(nr.qsubs, []*subscription{sub})
}
}
s.cache[k] = nr
}
}
}
// removeFromCache will remove the sub from any active cache entries.
// Assumes write lock is held.
func (s *Sublist) removeFromCache(subject string, sub *subscription) {
for k := range s.cache {
if !matchLiteral(k, subject) {
continue
}
// Since someone else may be referecing, can't modify the list
// safely, just let it re-populate.
delete(s.cache, k)
}
}
// Match will match all entries to the literal subject.
// It will return a set of results for both normal and queue subscribers.
func (s *Sublist) Match(subject string) *SublistResult {
s.RLock()
atomic.AddUint64(&s.matches, 1)
rc, ok := s.cache[subject]
s.RUnlock()
if ok {
atomic.AddUint64(&s.cacheHits, 1)
return rc
}
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
// FIXME(dlc) - Make shared pool between sublist and client readLoop?
result := &SublistResult{}
s.Lock()
matchLevel(s.root, tokens, result)
// Add to our cache
s.cache[subject] = result
// Bound the number of entries to sublistMaxCache
if len(s.cache) > slCacheMax {
for k := range s.cache {
delete(s.cache, k)
break
}
}
s.Unlock()
return result
}
// This will add in a node's results to the total results.
func addNodeToResults(n *node, results *SublistResult) {
results.psubs = append(results.psubs, n.psubs...)
for _, qr := range n.qsubs {
if len(qr) == 0 {
continue
}
// Need to find matching list in results
if i := findQSliceForSub(qr[0], results.qsubs); i >= 0 {
results.qsubs[i] = append(results.qsubs[i], qr...)
} else {
results.qsubs = append(results.qsubs, qr)
}
}
}
// We do not use a map here since we want iteration to be past when
// processing publishes in L1 on client. So we need to walk sequentially
// for now. Keep an eye on this in case we start getting large number of
// different queue subscribers for the same subject.
func findQSliceForSub(sub *subscription, qsl [][]*subscription) int {
if sub.queue == nil {
return -1
}
for i, qr := range qsl {
if len(qr) > 0 && bytes.Equal(sub.queue, qr[0].queue) {
return i
}
}
return -1
}
// matchLevel is used to recursively descend into the trie.
func matchLevel(l *level, toks []string, results *SublistResult) {
var pwc, n *node
for i, t := range toks {
if l == nil {
return
}
if l.fwc != nil {
addNodeToResults(l.fwc, results)
}
if pwc = l.pwc; pwc != nil {
matchLevel(pwc.next, toks[i+1:], results)
}
n = l.nodes[t]
if n != nil {
l = n.next
} else {
l = nil
}
}
if n != nil {
addNodeToResults(n, results)
}
if pwc != nil {
addNodeToResults(pwc, results)
}
}
// lnt is used to track descent into levels for a removal for pruning.
type lnt struct {
l *level
n *node
t string
}
// Remove will remove a subscription.
func (s *Sublist) Remove(sub *subscription) error {
subject := string(sub.subject)
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
s.Lock()
defer s.Unlock()
sfwc := false
l := s.root
var n *node
// Track levels for pruning
var lnts [32]lnt
levels := lnts[:0]
for _, t := range tokens {
lt := len(t)
if lt == 0 || sfwc {
return ErrInvalidSubject
}
if l == nil {
return ErrNotFound
}
if lt > 1 {
n = l.nodes[t]
} else {
switch t[0] {
case pwc:
n = l.pwc
case fwc:
n = l.fwc
sfwc = true
default:
n = l.nodes[t]
}
}
if n != nil {
levels = append(levels, lnt{l, n, t})
l = n.next
} else {
l = nil
}
}
if !s.removeFromNode(n, sub) {
return ErrNotFound
}
s.count--
s.removes++
for i := len(levels) - 1; i >= 0; i-- {
l, n, t := levels[i].l, levels[i].n, levels[i].t
if n.isEmpty() {
l.pruneNode(n, t)
}
}
s.removeFromCache(subject, sub)
atomic.AddUint64(&s.genid, 1)
return nil
}
// pruneNode is used to prune an empty node from the tree.
func (l *level) pruneNode(n *node, t string) {
if n == nil {
return
}
if n == l.fwc {
l.fwc = nil
} else if n == l.pwc {
l.pwc = nil
} else {
delete(l.nodes, t)
}
}
// isEmpty will test if the node has any entries. Used
// in pruning.
func (n *node) isEmpty() bool {
if len(n.psubs) == 0 && len(n.qsubs) == 0 {
if n.next == nil || n.next.numNodes() == 0 {
return true
}
}
return false
}
// Return the number of nodes for the given level.
func (l *level) numNodes() int {
num := len(l.nodes)
if l.pwc != nil {
num++
}
if l.fwc != nil {
num++
}
return num
}
// Removes a sub from a list.
func removeSubFromList(sub *subscription, sl []*subscription) ([]*subscription, bool) {
for i := 0; i < len(sl); i++ {
if sl[i] == sub {
last := len(sl) - 1
sl[i] = sl[last]
sl[last] = nil
sl = sl[:last]
return shrinkAsNeeded(sl), true
}
}
return sl, false
}
// Remove the sub for the given node.
func (s *Sublist) removeFromNode(n *node, sub *subscription) (found bool) {
if n == nil {
return false
}
if sub.queue == nil {
n.psubs, found = removeSubFromList(sub, n.psubs)
return found
}
// We have a queue group subscription here
if i := findQSliceForSub(sub, n.qsubs); i >= 0 {
n.qsubs[i], found = removeSubFromList(sub, n.qsubs[i])
if len(n.qsubs[i]) == 0 {
last := len(n.qsubs) - 1
n.qsubs[i] = n.qsubs[last]
n.qsubs[last] = nil
n.qsubs = n.qsubs[:last]
if len(n.qsubs) == 0 {
n.qsubs = nil
}
}
return found
}
return false
}
// Checks if we need to do a resize. This is for very large growth then
// subsequent return to a more normal size from unsubscribe.
func shrinkAsNeeded(sl []*subscription) []*subscription {
lsl := len(sl)
csl := cap(sl)
// Don't bother if list not too big
if csl <= 8 {
return sl
}
pFree := float32(csl-lsl) / float32(csl)
if pFree > 0.50 {
return append([]*subscription(nil), sl...)
}
return sl
}
// Count returns the number of subscriptions.
func (s *Sublist) Count() uint32 {
s.RLock()
defer s.RUnlock()
return s.count
}
// CacheCount returns the number of result sets in the cache.
func (s *Sublist) CacheCount() int {
s.RLock()
defer s.RUnlock()
return len(s.cache)
}
// Public stats for the sublist
type SublistStats struct {
NumSubs uint32 `json:"num_subscriptions"`
NumCache uint32 `json:"num_cache"`
NumInserts uint64 `json:"num_inserts"`
NumRemoves uint64 `json:"num_removes"`
NumMatches uint64 `json:"num_matches"`
CacheHitRate float64 `json:"cache_hit_rate"`
MaxFanout uint32 `json:"max_fanout"`
AvgFanout float64 `json:"avg_fanout"`
}
// Stats will return a stats structure for the current state.
func (s *Sublist) Stats() *SublistStats {
s.Lock()
defer s.Unlock()
st := &SublistStats{}
st.NumSubs = s.count
st.NumCache = uint32(len(s.cache))
st.NumInserts = s.inserts
st.NumRemoves = s.removes
st.NumMatches = atomic.LoadUint64(&s.matches)
if st.NumMatches > 0 {
st.CacheHitRate = float64(atomic.LoadUint64(&s.cacheHits)) / float64(st.NumMatches)
}
// whip through cache for fanout stats
tot, max := 0, 0
for _, r := range s.cache {
l := len(r.psubs) + len(r.qsubs)
tot += l
if l > max {
max = l
}
}
st.MaxFanout = uint32(max)
if tot > 0 {
st.AvgFanout = float64(tot) / float64(len(s.cache))
}
return st
}
// numLevels will return the maximum number of levels
// contained in the Sublist tree.
func (s *Sublist) numLevels() int {
return visitLevel(s.root, 0)
}
// visitLevel is used to descend the Sublist tree structure
// recursively.
func visitLevel(l *level, depth int) int {
if l == nil || l.numNodes() == 0 {
return depth
}
depth++
maxDepth := depth
for _, n := range l.nodes {
if n == nil {
continue
}
newDepth := visitLevel(n.next, depth)
if newDepth > maxDepth {
maxDepth = newDepth
}
}
if l.pwc != nil {
pwcDepth := visitLevel(l.pwc.next, depth)
if pwcDepth > maxDepth {
maxDepth = pwcDepth
}
}
if l.fwc != nil {
fwcDepth := visitLevel(l.fwc.next, depth)
if fwcDepth > maxDepth {
maxDepth = fwcDepth
}
}
return maxDepth
}
// IsValidSubject returns true if a subject is valid, false otherwise
func IsValidSubject(subject string) bool {
if subject == "" {
return false
}
sfwc := false
tokens := strings.Split(subject, tsep)
for _, t := range tokens {
if len(t) == 0 || sfwc {
return false
}
if len(t) > 1 {
continue
}
switch t[0] {
case fwc:
sfwc = true
}
}
return true
}
// IsValidLiteralSubject returns true if a subject is valid and literal (no wildcards), false otherwise
func IsValidLiteralSubject(subject string) bool {
tokens := strings.Split(subject, tsep)
for _, t := range tokens {
if len(t) == 0 {
return false
}
if len(t) > 1 {
continue
}
switch t[0] {
case pwc, fwc:
return false
}
}
return true
}
// matchLiteral is used to test literal subjects, those that do not have any
// wildcards, with a target subject. This is used in the cache layer.
func matchLiteral(literal, subject string) bool {
li := 0
ll := len(literal)
ls := len(subject)
for i := 0; i < ls; i++ {
if li >= ll {
return false
}
// This function has been optimized for speed.
// For instance, do not set b:=subject[i] here since
// we may bump `i` in this loop to avoid `continue` or
// skiping common test in a particular test.
// Run Benchmark_SublistMatchLiteral before making any change.
switch subject[i] {
case pwc:
// NOTE: This is not testing validity of a subject, instead ensures
// that wildcards are treated as such if they follow some basic rules,
// namely that they are a token on their own.
if i == 0 || subject[i-1] == btsep {
if i == ls-1 {
// There is no more token in the subject after this wildcard.
// Skip token in literal and expect to not find a separator.
for {
// End of literal, this is a match.
if li >= ll {
return true
}
// Presence of separator, this can't be a match.
if literal[li] == btsep {
return false
}
li++
}
} else if subject[i+1] == btsep {
// There is another token in the subject after this wildcard.
// Skip token in literal and expect to get a separator.
for {
// We found the end of the literal before finding a separator,
// this can't be a match.
if li >= ll {
return false
}
if literal[li] == btsep {
break
}
li++
}
// Bump `i` since we know there is a `.` following, we are
// safe. The common test below is going to check `.` with `.`
// which is good. A `continue` here is too costly.
i++
}
}
case fwc:
// For `>` to be a wildcard, it means being the only or last character
// in the string preceded by a `.`
if (i == 0 || subject[i-1] == btsep) && i == ls-1 {
return true
}
}
if subject[i] != literal[li] {
return false
}
li++
}
// Make sure we have processed all of the literal's chars..
return li >= ll
}
| 1 | 7,495 | Any noticeable performance hit? Any other way to avoid the allocation and copy even of the array? | nats-io-nats-server | go |
@@ -234,8 +234,8 @@ func (i2c I2C) readByte() byte {
func (i2c I2C) readLastByte() byte {
for i2c.Bus.EVENTS_RXDREADY == 0 {
}
- i2c.Bus.EVENTS_RXDREADY = 0
i2c.signalStop() // signal 'stop' now, so it is sent when reading RXD
+ i2c.Bus.EVENTS_RXDREADY = 0
return byte(i2c.Bus.RXD)
}
| 1 | // +build nrf
package machine
import (
"device/arm"
"device/nrf"
)
type GPIOMode uint8
const (
GPIO_INPUT = (nrf.GPIO_PIN_CNF_DIR_Input << nrf.GPIO_PIN_CNF_DIR_Pos) | (nrf.GPIO_PIN_CNF_INPUT_Connect << nrf.GPIO_PIN_CNF_INPUT_Pos)
GPIO_INPUT_PULLUP = GPIO_INPUT | (nrf.GPIO_PIN_CNF_PULL_Pullup << nrf.GPIO_PIN_CNF_PULL_Pos)
GPIO_INPUT_PULLDOWN = GPIO_INPUT | (nrf.GPIO_PIN_CNF_PULL_Pulldown << nrf.GPIO_PIN_CNF_PULL_Pos)
GPIO_OUTPUT = (nrf.GPIO_PIN_CNF_DIR_Output << nrf.GPIO_PIN_CNF_DIR_Pos) | (nrf.GPIO_PIN_CNF_INPUT_Disconnect << nrf.GPIO_PIN_CNF_INPUT_Pos)
)
// Configure this pin with the given configuration.
func (p GPIO) Configure(config GPIOConfig) {
cfg := config.Mode | nrf.GPIO_PIN_CNF_DRIVE_S0S1 | nrf.GPIO_PIN_CNF_SENSE_Disabled
port, pin := p.getPortPin()
port.PIN_CNF[pin] = nrf.RegValue(cfg)
}
// Set the pin to high or low.
// Warning: only use this on an output pin!
func (p GPIO) Set(high bool) {
port, pin := p.getPortPin()
if high {
port.OUTSET = 1 << pin
} else {
port.OUTCLR = 1 << pin
}
}
// Return the register and mask to enable a given GPIO pin. This can be used to
// implement bit-banged drivers.
func (p GPIO) PortMaskSet() (*uint32, uint32) {
port, pin := p.getPortPin()
return (*uint32)(&port.OUTSET), 1 << pin
}
// Return the register and mask to disable a given port. This can be used to
// implement bit-banged drivers.
func (p GPIO) PortMaskClear() (*uint32, uint32) {
port, pin := p.getPortPin()
return (*uint32)(&port.OUTCLR), 1 << pin
}
// Get returns the current value of a GPIO pin.
func (p GPIO) Get() bool {
port, pin := p.getPortPin()
return (port.IN>>pin)&1 != 0
}
// UART on the NRF.
type UART struct {
Buffer *RingBuffer
}
// UART
var (
// UART0 is the hardware serial port on the NRF.
UART0 = UART{Buffer: NewRingBuffer()}
)
// Configure the UART.
func (uart UART) Configure(config UARTConfig) {
// Default baud rate to 115200.
if config.BaudRate == 0 {
config.BaudRate = 115200
}
uart.SetBaudRate(config.BaudRate)
// Set TX and RX pins from board.
uart.setPins(UART_TX_PIN, UART_RX_PIN)
nrf.UART0.ENABLE = nrf.UART_ENABLE_ENABLE_Enabled
nrf.UART0.TASKS_STARTTX = 1
nrf.UART0.TASKS_STARTRX = 1
nrf.UART0.INTENSET = nrf.UART_INTENSET_RXDRDY_Msk
// Enable RX IRQ.
arm.SetPriority(nrf.IRQ_UART0, 0xc0) // low priority
arm.EnableIRQ(nrf.IRQ_UART0)
}
// SetBaudRate sets the communication speed for the UART.
func (uart UART) SetBaudRate(br uint32) {
// Magic: calculate 'baudrate' register from the input number.
// Every value listed in the datasheet will be converted to the
// correct register value, except for 192600. I suspect the value
// listed in the nrf52 datasheet (0x0EBED000) is incorrectly rounded
// and should be 0x0EBEE000, as the nrf51 datasheet lists the
// nonrounded value 0x0EBEDFA4.
// Some background:
// https://devzone.nordicsemi.com/f/nordic-q-a/391/uart-baudrate-register-values/2046#2046
rate := uint32((uint64(br/400)*uint64(400*0xffffffff/16000000) + 0x800) & 0xffffff000)
nrf.UART0.BAUDRATE = nrf.RegValue(rate)
}
// WriteByte writes a byte of data to the UART.
func (uart UART) WriteByte(c byte) error {
nrf.UART0.EVENTS_TXDRDY = 0
nrf.UART0.TXD = nrf.RegValue(c)
for nrf.UART0.EVENTS_TXDRDY == 0 {
}
return nil
}
func (uart UART) handleInterrupt() {
if nrf.UART0.EVENTS_RXDRDY != 0 {
uart.Receive(byte(nrf.UART0.RXD))
nrf.UART0.EVENTS_RXDRDY = 0x0
}
}
// I2C on the NRF.
type I2C struct {
Bus *nrf.TWI_Type
}
// There are 2 I2C interfaces on the NRF.
var (
I2C0 = I2C{Bus: nrf.TWI0}
I2C1 = I2C{Bus: nrf.TWI1}
)
// I2CConfig is used to store config info for I2C.
type I2CConfig struct {
Frequency uint32
SCL uint8
SDA uint8
}
// Configure is intended to setup the I2C interface.
func (i2c I2C) Configure(config I2CConfig) {
// Default I2C bus speed is 100 kHz.
if config.Frequency == 0 {
config.Frequency = TWI_FREQ_100KHZ
}
// Default I2C pins if not set.
if config.SDA == 0 && config.SCL == 0 {
config.SDA = SDA_PIN
config.SCL = SCL_PIN
}
// do config
sclPort, sclPin := GPIO{config.SCL}.getPortPin()
sclPort.PIN_CNF[sclPin] = (nrf.GPIO_PIN_CNF_DIR_Input << nrf.GPIO_PIN_CNF_DIR_Pos) |
(nrf.GPIO_PIN_CNF_INPUT_Connect << nrf.GPIO_PIN_CNF_INPUT_Pos) |
(nrf.GPIO_PIN_CNF_PULL_Pullup << nrf.GPIO_PIN_CNF_PULL_Pos) |
(nrf.GPIO_PIN_CNF_DRIVE_S0D1 << nrf.GPIO_PIN_CNF_DRIVE_Pos) |
(nrf.GPIO_PIN_CNF_SENSE_Disabled << nrf.GPIO_PIN_CNF_SENSE_Pos)
sdaPort, sdaPin := GPIO{config.SDA}.getPortPin()
sdaPort.PIN_CNF[sdaPin] = (nrf.GPIO_PIN_CNF_DIR_Input << nrf.GPIO_PIN_CNF_DIR_Pos) |
(nrf.GPIO_PIN_CNF_INPUT_Connect << nrf.GPIO_PIN_CNF_INPUT_Pos) |
(nrf.GPIO_PIN_CNF_PULL_Pullup << nrf.GPIO_PIN_CNF_PULL_Pos) |
(nrf.GPIO_PIN_CNF_DRIVE_S0D1 << nrf.GPIO_PIN_CNF_DRIVE_Pos) |
(nrf.GPIO_PIN_CNF_SENSE_Disabled << nrf.GPIO_PIN_CNF_SENSE_Pos)
if config.Frequency == TWI_FREQ_400KHZ {
i2c.Bus.FREQUENCY = nrf.TWI_FREQUENCY_FREQUENCY_K400
} else {
i2c.Bus.FREQUENCY = nrf.TWI_FREQUENCY_FREQUENCY_K100
}
i2c.Bus.ENABLE = nrf.TWI_ENABLE_ENABLE_Enabled
i2c.setPins(config.SCL, config.SDA)
}
// Tx does a single I2C transaction at the specified address.
// It clocks out the given address, writes the bytes in w, reads back len(r)
// bytes and stores them in r, and generates a stop condition on the bus.
func (i2c I2C) Tx(addr uint16, w, r []byte) error {
i2c.Bus.ADDRESS = nrf.RegValue(addr)
if len(w) != 0 {
i2c.Bus.TASKS_STARTTX = 1 // start transmission for writing
for _, b := range w {
i2c.writeByte(b)
}
}
if len(r) != 0 {
i2c.Bus.TASKS_STARTRX = 1 // re-start transmission for reading
for i := range r { // read each char
if i+1 == len(r) {
// The 'stop' signal must be sent before reading back the last
// byte, so that it will be sent by the I2C peripheral right
// after the last byte has been read.
r[i] = i2c.readLastByte()
} else {
r[i] = i2c.readByte()
}
}
} else {
// Nothing to read back. Stop the transmission.
i2c.signalStop()
}
return nil
}
// signalStop sends a stop signal when writing or tells the I2C peripheral that
// it must generate a stop condition after the next character is retrieved when
// reading.
func (i2c I2C) signalStop() {
i2c.Bus.TASKS_STOP = 1
for i2c.Bus.EVENTS_STOPPED == 0 {
}
i2c.Bus.EVENTS_STOPPED = 0
}
// writeByte writes a single byte to the I2C bus.
func (i2c I2C) writeByte(data byte) {
i2c.Bus.TXD = nrf.RegValue(data)
for i2c.Bus.EVENTS_TXDSENT == 0 {
}
i2c.Bus.EVENTS_TXDSENT = 0
}
// readByte reads a single byte from the I2C bus.
func (i2c I2C) readByte() byte {
for i2c.Bus.EVENTS_RXDREADY == 0 {
}
i2c.Bus.EVENTS_RXDREADY = 0
return byte(i2c.Bus.RXD)
}
// readLastByte reads a single byte from the I2C bus, sending a stop signal
// after it has been read.
func (i2c I2C) readLastByte() byte {
for i2c.Bus.EVENTS_RXDREADY == 0 {
}
i2c.Bus.EVENTS_RXDREADY = 0
i2c.signalStop() // signal 'stop' now, so it is sent when reading RXD
return byte(i2c.Bus.RXD)
}
// SPI on the NRF.
type SPI struct {
Bus *nrf.SPI_Type
}
// There are 2 SPI interfaces on the NRF5x.
var (
SPI0 = SPI{Bus: nrf.SPI0}
SPI1 = SPI{Bus: nrf.SPI1}
)
// SPIConfig is used to store config info for SPI.
type SPIConfig struct {
Frequency uint32
SCK uint8
MOSI uint8
MISO uint8
LSBFirst bool
Mode uint8
}
// Configure is intended to setup the SPI interface.
func (spi SPI) Configure(config SPIConfig) {
// Disable bus to configure it
spi.Bus.ENABLE = nrf.SPI_ENABLE_ENABLE_Disabled
// set frequency
var freq uint32
switch config.Frequency {
case 125000:
freq = nrf.SPI_FREQUENCY_FREQUENCY_K125
case 250000:
freq = nrf.SPI_FREQUENCY_FREQUENCY_K250
case 500000:
freq = nrf.SPI_FREQUENCY_FREQUENCY_K500
case 1000000:
freq = nrf.SPI_FREQUENCY_FREQUENCY_M1
case 2000000:
freq = nrf.SPI_FREQUENCY_FREQUENCY_M2
case 4000000:
freq = nrf.SPI_FREQUENCY_FREQUENCY_M4
case 8000000:
freq = nrf.SPI_FREQUENCY_FREQUENCY_M8
default:
freq = nrf.SPI_FREQUENCY_FREQUENCY_K500
}
spi.Bus.FREQUENCY = nrf.RegValue(freq)
var conf uint32
// set bit transfer order
if config.LSBFirst {
conf = (nrf.SPI_CONFIG_ORDER_LsbFirst << nrf.SPI_CONFIG_ORDER_Pos)
}
// set mode
switch config.Mode {
case 0:
conf &^= (nrf.SPI_CONFIG_CPOL_ActiveHigh << nrf.SPI_CONFIG_CPOL_Pos)
conf &^= (nrf.SPI_CONFIG_CPHA_Leading << nrf.SPI_CONFIG_CPHA_Pos)
case 1:
conf &^= (nrf.SPI_CONFIG_CPOL_ActiveHigh << nrf.SPI_CONFIG_CPOL_Pos)
conf |= (nrf.SPI_CONFIG_CPHA_Trailing << nrf.SPI_CONFIG_CPHA_Pos)
case 2:
conf |= (nrf.SPI_CONFIG_CPOL_ActiveLow << nrf.SPI_CONFIG_CPOL_Pos)
conf &^= (nrf.SPI_CONFIG_CPHA_Leading << nrf.SPI_CONFIG_CPHA_Pos)
case 3:
conf |= (nrf.SPI_CONFIG_CPOL_ActiveLow << nrf.SPI_CONFIG_CPOL_Pos)
conf |= (nrf.SPI_CONFIG_CPHA_Trailing << nrf.SPI_CONFIG_CPHA_Pos)
default: // to mode
conf &^= (nrf.SPI_CONFIG_CPOL_ActiveHigh << nrf.SPI_CONFIG_CPOL_Pos)
conf &^= (nrf.SPI_CONFIG_CPHA_Leading << nrf.SPI_CONFIG_CPHA_Pos)
}
spi.Bus.CONFIG = nrf.RegValue(conf)
// set pins
spi.setPins(config.SCK, config.MOSI, config.MISO)
// Re-enable bus now that it is configured.
spi.Bus.ENABLE = nrf.SPI_ENABLE_ENABLE_Enabled
}
// Transfer writes/reads a single byte using the SPI interface.
func (spi SPI) Transfer(w byte) (byte, error) {
spi.Bus.TXD = nrf.RegValue(w)
for spi.Bus.EVENTS_READY == 0 {
}
r := spi.Bus.RXD
spi.Bus.EVENTS_READY = 0
// TODO: handle SPI errors
return byte(r), nil
}
| 1 | 7,052 | What if you remove this line entirely from here? | tinygo-org-tinygo | go |
@@ -124,11 +124,13 @@ class PAFPN(FPN):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
- if self.extra_convs_on_inputs:
+ if self.add_extra_convs == 'on_input':
orig = inputs[self.backbone_end_level - 1]
outs.append(self.fpn_convs[used_backbone_levels](orig))
- else:
+ elif self.add_extra_convs == 'on_output':
outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))
+ else:
+ raise NotImplementedError
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1]))) | 1 | import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import auto_fp16
from ..builder import NECKS
from .fpn import FPN
@NECKS.register_module()
class PAFPN(FPN):
"""Path Aggregation Network for Instance Segmentation.
This is an implementation of the `PAFPN in Path Aggregation Network
<https://arxiv.org/abs/1803.01534>`_.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool): Whether to add conv layers on top of the
original feature maps. Default: False.
extra_convs_on_inputs (bool): Whether to apply extra conv on
the original feature from the backbone. Default: False.
relu_before_extra_convs (bool): Whether to apply relu before the extra
conv. Default: False.
no_norm_on_lateral (bool): Whether to apply norm on lateral.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
act_cfg (str): Config dict for activation layer in ConvModule.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
extra_convs_on_inputs=True,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=None):
super(PAFPN,
self).__init__(in_channels, out_channels, num_outs, start_level,
end_level, add_extra_convs, extra_convs_on_inputs,
relu_before_extra_convs, no_norm_on_lateral,
conv_cfg, norm_cfg, act_cfg)
# add extra bottom up pathway
self.downsample_convs = nn.ModuleList()
self.pafpn_convs = nn.ModuleList()
for i in range(self.start_level + 1, self.backbone_end_level):
d_conv = ConvModule(
out_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
pafpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.downsample_convs.append(d_conv)
self.pafpn_convs.append(pafpn_conv)
@auto_fp16()
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] += F.interpolate(
laterals[i], size=prev_shape, mode='nearest')
# build outputs
# part 1: from original levels
inter_outs = [
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
# part 2: add bottom-up path
for i in range(0, used_backbone_levels - 1):
inter_outs[i + 1] += self.downsample_convs[i](inter_outs[i])
outs = []
outs.append(inter_outs[0])
outs.extend([
self.pafpn_convs[i - 1](inter_outs[i])
for i in range(1, used_backbone_levels)
])
# part 3: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.extra_convs_on_inputs:
orig = inputs[self.backbone_end_level - 1]
outs.append(self.fpn_convs[used_backbone_levels](orig))
else:
outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| 1 | 21,929 | `on_lateral` can also be implemented | open-mmlab-mmdetection | py |
@@ -67,11 +67,14 @@ public class ItunesAdapter extends ArrayAdapter<PodcastSearchResult> {
//Set the title
viewHolder.titleView.setText(podcast.title);
- if(podcast.feedUrl != null && !podcast.feedUrl.contains("itunes.apple.com")) {
- viewHolder.urlView.setText(podcast.feedUrl);
- viewHolder.urlView.setVisibility(View.VISIBLE);
+ if(podcast.author != null && ! podcast.author.trim().isEmpty()) {
+ viewHolder.authorView.setText(podcast.author);
+ viewHolder.authorView.setVisibility(View.VISIBLE);
+ } else if(podcast.feedUrl != null && !podcast.feedUrl.contains("itunes.apple.com")) {
+ viewHolder.authorView.setText(podcast.feedUrl);
+ viewHolder.authorView.setVisibility(View.VISIBLE);
} else {
- viewHolder.urlView.setVisibility(View.GONE);
+ viewHolder.authorView.setVisibility(View.GONE);
}
//Update the empty imageView with the image from the feed | 1 | package de.danoeh.antennapod.adapter.itunes;
import android.content.Context;
import androidx.annotation.NonNull;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ArrayAdapter;
import android.widget.ImageView;
import android.widget.TextView;
import com.bumptech.glide.Glide;
import com.bumptech.glide.load.engine.DiskCacheStrategy;
import com.bumptech.glide.request.RequestOptions;
import de.danoeh.antennapod.discovery.PodcastSearchResult;
import java.util.List;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.activity.MainActivity;
public class ItunesAdapter extends ArrayAdapter<PodcastSearchResult> {
/**
* Related Context
*/
private final Context context;
/**
* List holding the podcasts found in the search
*/
private final List<PodcastSearchResult> data;
/**
* Constructor.
*
* @param context Related context
* @param objects Search result
*/
public ItunesAdapter(Context context, List<PodcastSearchResult> objects) {
super(context, 0, objects);
this.data = objects;
this.context = context;
}
@NonNull
@Override
public View getView(int position, View convertView, @NonNull ViewGroup parent) {
//Current podcast
PodcastSearchResult podcast = data.get(position);
//ViewHolder
PodcastViewHolder viewHolder;
//Resulting view
View view;
//Handle view holder stuff
if(convertView == null) {
view = ((MainActivity) context).getLayoutInflater()
.inflate(R.layout.itunes_podcast_listitem, parent, false);
viewHolder = new PodcastViewHolder(view);
view.setTag(viewHolder);
} else {
view = convertView;
viewHolder = (PodcastViewHolder) view.getTag();
}
//Set the title
viewHolder.titleView.setText(podcast.title);
if(podcast.feedUrl != null && !podcast.feedUrl.contains("itunes.apple.com")) {
viewHolder.urlView.setText(podcast.feedUrl);
viewHolder.urlView.setVisibility(View.VISIBLE);
} else {
viewHolder.urlView.setVisibility(View.GONE);
}
//Update the empty imageView with the image from the feed
Glide.with(context)
.load(podcast.imageUrl)
.apply(new RequestOptions()
.placeholder(R.color.light_gray)
.diskCacheStrategy(DiskCacheStrategy.NONE)
.fitCenter()
.dontAnimate())
.into(viewHolder.coverView);
//Feed the grid view
return view;
}
/**
* View holder object for the GridView
*/
static class PodcastViewHolder {
/**
* ImageView holding the Podcast image
*/
final ImageView coverView;
/**
* TextView holding the Podcast title
*/
final TextView titleView;
final TextView urlView;
/**
* Constructor
* @param view GridView cell
*/
PodcastViewHolder(View view){
coverView = view.findViewById(R.id.imgvCover);
titleView = view.findViewById(R.id.txtvTitle);
urlView = view.findViewById(R.id.txtvUrl);
}
}
}
| 1 | 15,658 | Please add a space between `if` and `(`. | AntennaPod-AntennaPod | java |
@@ -30,6 +30,12 @@ import (
const (
PoolOperator = "zpool"
StatusNoPoolsAvailable = "no pools available"
+ ZpoolStatusDegraded = "DEGRADED"
+ ZpoolStatusFaulted = "FAULTED"
+ ZpoolStatusOffline = "OFFLINE"
+ ZpoolStatusOnline = "ONLINE"
+ ZpoolStatusRemoved = "REMOVED"
+ ZpoolStatusUnavail = "UNAVAIL"
)
//PoolAddEventHandled is a flag representing if the pool has been initially imported or created | 1 | /*
Copyright 2018 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pool
import (
"fmt"
"strings"
"time"
"github.com/golang/glog"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"github.com/openebs/maya/pkg/util"
)
// PoolOperator is the name of the tool that makes pool-related operations.
const (
PoolOperator = "zpool"
StatusNoPoolsAvailable = "no pools available"
)
//PoolAddEventHandled is a flag representing if the pool has been initially imported or created
var PoolAddEventHandled = false
// PoolNamePrefix is a typed string to store pool name prefix
type PoolNamePrefix string
// PoolPrefix is prefix for pool name
const (
PoolPrefix PoolNamePrefix = "cstor-"
)
// RunnerVar the runner variable for executing binaries.
var RunnerVar util.Runner
// ImportPool imports cStor pool if already present.
func ImportPool(cStorPool *apis.CStorPool, cachefileFlag bool) error {
importAttr := importPoolBuilder(cStorPool, cachefileFlag)
stdoutStderr, err := RunnerVar.RunCombinedOutput(PoolOperator, importAttr...)
if err != nil {
glog.Errorf("Unable to import pool: %v, %v", err.Error(), string(stdoutStderr))
return err
}
glog.Info("Importing Pool Successful")
return nil
}
// importPoolBuilder is to build pool import command.
func importPoolBuilder(cStorPool *apis.CStorPool, cachefileFlag bool) []string {
// populate pool import attributes.
var importAttr []string
importAttr = append(importAttr, "import")
if cStorPool.Spec.PoolSpec.CacheFile != "" && cachefileFlag {
importAttr = append(importAttr, "-c", cStorPool.Spec.PoolSpec.CacheFile,
"-o", cStorPool.Spec.PoolSpec.CacheFile)
}
importAttr = append(importAttr, string(PoolPrefix)+string(cStorPool.ObjectMeta.UID))
return importAttr
}
// CreatePool creates a new cStor pool.
func CreatePool(cStorPool *apis.CStorPool) error {
createAttr := createPoolBuilder(cStorPool)
glog.V(4).Info("createAttr : ", createAttr)
stdoutStderr, err := RunnerVar.RunCombinedOutput(PoolOperator, createAttr...)
if err != nil {
glog.Errorf("Unable to create pool: %v", string(stdoutStderr))
return err
}
return nil
}
// createPoolBuilder is to build create pool command.
func createPoolBuilder(cStorPool *apis.CStorPool) []string {
// populate pool creation attributes.
var createAttr []string
// When disks of other file formats, say ext4, are used to create cstorpool,
// it errors out with normal zpool create. To avoid that, we go for forceful create.
createAttr = append(createAttr, "create", "-f")
if cStorPool.Spec.PoolSpec.CacheFile != "" {
cachefile := "cachefile=" + cStorPool.Spec.PoolSpec.CacheFile
createAttr = append(createAttr, "-o", cachefile)
}
openebsPoolname := "io.openebs:poolname=" + cStorPool.Name
createAttr = append(createAttr, "-O", openebsPoolname)
poolNameUID := string(PoolPrefix) + string(cStorPool.ObjectMeta.UID)
createAttr = append(createAttr, poolNameUID)
// To generate mirror disk0 disk1 mirror disk2 disk3 format.
for i, disk := range cStorPool.Spec.Disks.DiskList {
if cStorPool.Spec.PoolSpec.PoolType == "mirror" && i%2 == 0 {
createAttr = append(createAttr, "mirror")
}
createAttr = append(createAttr, disk)
}
return createAttr
}
// CheckValidPool checks for validity of CStorPool resource.
func CheckValidPool(cStorPool *apis.CStorPool) error {
if len(string(cStorPool.ObjectMeta.UID)) == 0 {
return fmt.Errorf("Poolname/UID cannot be empty")
}
if len(cStorPool.Spec.Disks.DiskList) < 1 {
return fmt.Errorf("Disk name(s) cannot be empty")
}
if cStorPool.Spec.PoolSpec.PoolType == "mirror" &&
len(cStorPool.Spec.Disks.DiskList)%2 != 0 {
return fmt.Errorf("Mirror poolType needs even number of disks")
}
return nil
}
// GetPoolName return the pool already created.
func GetPoolName() ([]string, error) {
GetPoolStr := []string{"get", "-Hp", "name", "-o", "name"}
poolNameByte, err := RunnerVar.RunStdoutPipe(PoolOperator, GetPoolStr...)
if err != nil || len(string(poolNameByte)) == 0 {
return []string{}, err
}
noisyPoolName := string(poolNameByte)
sepNoisyPoolName := strings.Split(noisyPoolName, "\n")
var poolNames []string
for _, poolName := range sepNoisyPoolName {
poolName = strings.TrimSpace(poolName)
poolNames = append(poolNames, poolName)
}
return poolNames, nil
}
// DeletePool destroys the pool created.
func DeletePool(poolName string) error {
deletePoolStr := []string{"destroy", poolName}
stdoutStderr, err := RunnerVar.RunCombinedOutput(PoolOperator, deletePoolStr...)
if err != nil {
glog.Errorf("Unable to delete pool: %v", string(stdoutStderr))
return err
}
return nil
}
// SetCachefile is to set the cachefile for pool.
func SetCachefile(cStorPool *apis.CStorPool) error {
poolNameUID := string(PoolPrefix) + string(cStorPool.ObjectMeta.UID)
setCachefileStr := []string{"set", "cachefile=" + cStorPool.Spec.PoolSpec.CacheFile,
poolNameUID}
stdoutStderr, err := RunnerVar.RunCombinedOutput(PoolOperator, setCachefileStr...)
if err != nil {
glog.Errorf("Unable to set cachefile: %v", string(stdoutStderr))
return err
}
return nil
}
// CheckForZreplInitial is blocking call for checking status of zrepl in cstor-pool container.
func CheckForZreplInitial(ZreplRetryInterval time.Duration) {
for {
_, err := RunnerVar.RunCombinedOutput(PoolOperator, "status")
if err != nil {
time.Sleep(ZreplRetryInterval)
glog.Errorf("zpool status returned error in zrepl startup : %v", err)
glog.Infof("Waiting for zpool replication container to start...")
continue
}
break
}
}
// CheckForZreplContinuous is continuous health checker for status of zrepl in cstor-pool container.
func CheckForZreplContinuous(ZreplRetryInterval time.Duration) {
for {
out, err := RunnerVar.RunCombinedOutput(PoolOperator, "status")
if err == nil {
//even though we imported pool, it disappeared (may be due to zrepl container crashing).
// so we need to reimport.
if PoolAddEventHandled && strings.Contains(string(out), StatusNoPoolsAvailable) {
break
}
time.Sleep(ZreplRetryInterval)
continue
}
glog.Errorf("zpool status returned error in zrepl healthcheck : %v, out: %s", err, out)
break
}
}
// LabelClear is to clear zpool label on disks.
func LabelClear(disks []string) error {
var failLabelClear = false
for _, disk := range disks {
labelClearStr := []string{"labelclear", "-f", disk}
stdoutStderr, err := RunnerVar.RunCombinedOutput(PoolOperator, labelClearStr...)
if err != nil {
glog.Errorf("Unable to clear label: %v, err = %v", string(stdoutStderr), err)
failLabelClear = true
}
}
if failLabelClear {
return fmt.Errorf("Unable to clear labels from the disks of the pool")
}
return nil
}
| 1 | 10,606 | This statuses might have been better off with some custom status type. However, good for now. | openebs-maya | go |
@@ -165,9 +165,11 @@ func GetIPNetDeviceByName(ifaceName string) (v4IPNet *net.IPNet, v6IPNet *net.IP
if ipNet, ok := addr.(*net.IPNet); ok {
if ipNet.IP.IsGlobalUnicast() {
if ipNet.IP.To4() != nil {
+ if v4IPNet == nil {
+ v4IPNet = ipNet
+ }
+ } else if v6IPNet == nil {
v6IPNet = ipNet
- } else {
- v4IPNet = ipNet
}
}
} | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"crypto/sha1" // #nosec G505: not used for security purposes
"encoding/hex"
"errors"
"fmt"
"io"
"net"
"strings"
"antrea.io/antrea/pkg/util/ip"
)
const (
interfaceNameLength = 15
interfacePrefixLength = 8
interfaceKeyLength = interfaceNameLength - (interfacePrefixLength + 1)
FamilyIPv4 uint8 = 4
FamilyIPv6 uint8 = 6
)
func generateInterfaceName(key string, name string, useHead bool) string {
hash := sha1.New() // #nosec G401: not used for security purposes
io.WriteString(hash, key)
interfaceKey := hex.EncodeToString(hash.Sum(nil))
prefix := name
if len(name) > interfacePrefixLength {
// We use Node/Pod name to generate the interface name,
// valid chars for Node/Pod name are ASCII letters from a to z,
// the digits from 0 to 9, and the hyphen (-).
// Hyphen (-) is the only char which will impact command-line interpretation
// if the interface name starts with one, so we remove it here.
if useHead {
prefix = strings.TrimLeft(name[:interfacePrefixLength], "-")
} else {
prefix = strings.TrimLeft(name[len(name)-interfacePrefixLength:], "-")
}
}
return fmt.Sprintf("%s-%s", prefix, interfaceKey[:interfaceKeyLength])
}
// GenerateContainerInterfaceKey generates a unique string for a Pod's
// interface as: container/<Container-ID>.
// We must use ContainerID instead of PodNamespace + PodName because there could
// be more than one container associated with the same Pod at some point.
// For example, when deleting a StatefulSet Pod with 0 second grace period, the
// Pod will be removed from the Kubernetes API very quickly and a new Pod will
// be created immediately, and kubelet may process the deletion of the previous
// Pod and the addition of the new Pod simultaneously.
func GenerateContainerInterfaceKey(containerID string) string {
return fmt.Sprintf("container/%s", containerID)
}
// GenerateNodeTunnelInterfaceKey generates a unique string for a Node's
// tunnel interface as: node/<Node-name>.
func GenerateNodeTunnelInterfaceKey(nodeName string) string {
return fmt.Sprintf("node/%s", nodeName)
}
// GenerateContainerInterfaceName generates a unique interface name using the
// Pod's namespace, name and containerID. The output should be deterministic (so that
// multiple calls to GenerateContainerInterfaceName with the same parameters
// return the same value). The output has the length of interfaceNameLength(15).
// The probability of collision should be neglectable.
func GenerateContainerInterfaceName(podName, podNamespace, containerID string) string {
// Use the podName as the prefix and the containerID as the hashing key.
// podNamespace is not used currently.
return generateInterfaceName(containerID, podName, true)
}
// GenerateNodeTunnelInterfaceName generates a unique interface name for the
// tunnel to the Node, using the Node's name.
func GenerateNodeTunnelInterfaceName(nodeName string) string {
return generateInterfaceName(GenerateNodeTunnelInterfaceKey(nodeName), nodeName, false)
}
type LinkNotFound struct {
error
}
func newLinkNotFoundError(name string) LinkNotFound {
return LinkNotFound{
fmt.Errorf("link %s not found", name),
}
}
func listenUnix(address string) (net.Listener, error) {
return net.Listen("unix", address)
}
func dialUnix(address string) (net.Conn, error) {
return net.Dial("unix", address)
}
// GetIPNetDeviceFromIP returns local IPs/masks and associated device from IP.
func GetIPNetDeviceFromIP(localIPs *ip.DualStackIPs) (v4IPNet *net.IPNet, v6IPNet *net.IPNet, iface *net.Interface, err error) {
linkList, err := net.Interfaces()
if err != nil {
return nil, nil, nil, err
}
// localIPs includes at most one IPv4 address and one IPv6 address. For each device in linkList, all its addresses
// are compared with IPs in localIPs. If found, the iface is set to the device and v4IPNet, v6IPNet are set to
// the matching addresses.
saveIface := func(current *net.Interface) error {
if iface != nil && iface.Index != current.Index {
return fmt.Errorf("IPs of localIPs should be on the same device")
}
iface = current
return nil
}
for i := range linkList {
addrList, err := linkList[i].Addrs()
if err != nil {
continue
}
for _, addr := range addrList {
if ipNet, ok := addr.(*net.IPNet); ok {
if ipNet.IP.Equal(localIPs.IPv4) {
if err := saveIface(&linkList[i]); err != nil {
return nil, nil, nil, err
}
v4IPNet = ipNet
} else if ipNet.IP.Equal(localIPs.IPv6) {
if err := saveIface(&linkList[i]); err != nil {
return nil, nil, nil, err
}
v6IPNet = ipNet
}
}
}
}
if iface == nil {
return nil, nil, nil, fmt.Errorf("unable to find local IPs and device")
}
return v4IPNet, v6IPNet, iface, nil
}
func GetIPNetDeviceByName(ifaceName string) (v4IPNet *net.IPNet, v6IPNet *net.IPNet, link *net.Interface, err error) {
link, err = net.InterfaceByName(ifaceName)
if err != nil {
return nil, nil, nil, err
}
addrList, err := link.Addrs()
if err != nil {
return nil, nil, nil, err
}
for _, addr := range addrList {
if ipNet, ok := addr.(*net.IPNet); ok {
if ipNet.IP.IsGlobalUnicast() {
if ipNet.IP.To4() != nil {
v6IPNet = ipNet
} else {
v4IPNet = ipNet
}
}
}
}
if v4IPNet != nil || v6IPNet != nil {
return v4IPNet, v6IPNet, link, nil
}
return nil, nil, nil, fmt.Errorf("unable to find local IP and device")
}
func GetIPv4Addr(ips []net.IP) net.IP {
for _, ip := range ips {
if ip.To4() != nil {
return ip
}
}
return nil
}
func GetIPWithFamily(ips []net.IP, addrFamily uint8) (net.IP, error) {
if addrFamily == FamilyIPv6 {
for _, ip := range ips {
if ip.To4() == nil {
return ip, nil
}
}
return nil, errors.New("no IP found with IPv6 AddressFamily")
}
for _, ip := range ips {
if ip.To4() != nil {
return ip, nil
}
}
return nil, errors.New("no IP found with IPv4 AddressFamily")
}
| 1 | 44,279 | Thanks for catching the bug. Here your change lets `v4IPNet` and `v6IPNet` take the first IPv4/IPv6 address in `addrList`. Maybe choose the last one also works and makes code simpler? What's your idea? @tnqn | antrea-io-antrea | go |
@@ -680,8 +680,12 @@ class UserResource(object):
try:
return self.model.get_record(record_id)
except storage_exceptions.RecordNotFoundError:
- response = http_error(HTTPNotFound(),
- errno=ERRORS.INVALID_RESOURCE_ID)
+ detail_dict = {
+ "id": record_id,
+ "resource_name": "record"
+ }
+ response = http_error(HTTPNotFound(), errno=ERRORS.INVALID_RESOURCE_ID,
+ details=detail_dict)
raise response
def _add_timestamp_header(self, response, timestamp=None): | 1 | import re
import functools
import colander
import venusian
import six
from pyramid import exceptions as pyramid_exceptions
from pyramid.decorator import reify
from pyramid.security import Everyone
from pyramid.httpexceptions import (HTTPNotModified, HTTPPreconditionFailed,
HTTPNotFound, HTTPServiceUnavailable)
from kinto.core import logger
from kinto.core import Service
from kinto.core.errors import http_error, raise_invalid, send_alert, ERRORS
from kinto.core.events import ACTIONS
from kinto.core.storage import exceptions as storage_exceptions, Filter, Sort
from kinto.core.utils import (
COMPARISON, classname, native_value, decode64, encode64, json,
encode_header, decode_header, dict_subset, recursive_update_dict
)
from .model import Model, ShareableModel
from .schema import ResourceSchema
from .viewset import ViewSet, ShareableViewSet
def register(depth=1, **kwargs):
"""Ressource class decorator.
Register the decorated class in the cornice registry.
Pass all its keyword arguments to the register_resource
function.
"""
def wrapped(resource):
register_resource(resource, depth=depth + 1, **kwargs)
return resource
return wrapped
def register_resource(resource_cls, settings=None, viewset=None, depth=1,
**kwargs):
"""Register a resource in the cornice registry.
:param resource_cls:
The resource class to register.
It should be a class or have a "name" attribute.
:param viewset:
A ViewSet object, which will be used to find out which arguments should
be appended to the views, and where the views are.
:param depth:
A depth offset. It will be used to determine what is the level of depth
in the call tree. (set to 1 by default.)
Any additional keyword parameters will be used to override the viewset
attributes.
"""
if viewset is None:
viewset = resource_cls.default_viewset(**kwargs)
else:
viewset.update(**kwargs)
resource_name = viewset.get_name(resource_cls)
def register_service(endpoint_type, settings):
"""Registers a service in cornice, for the given type.
"""
path_pattern = getattr(viewset, '%s_path' % endpoint_type)
path_values = {'resource_name': resource_name}
path = path_pattern.format(**path_values)
name = viewset.get_service_name(endpoint_type, resource_cls)
service = Service(name, path, depth=depth,
**viewset.get_service_arguments())
# Attach viewset and resource to the service for later reference.
service.viewset = viewset
service.resource = resource_cls
service.type = endpoint_type
# Attach collection and record paths.
service.collection_path = viewset.collection_path.format(**path_values)
service.record_path = (viewset.record_path.format(**path_values)
if viewset.record_path is not None else None)
methods = getattr(viewset, '%s_methods' % endpoint_type)
for method in methods:
if not viewset.is_endpoint_enabled(
endpoint_type, resource_name, method.lower(), settings):
continue
argument_getter = getattr(viewset, '%s_arguments' % endpoint_type)
view_args = argument_getter(resource_cls, method)
view = viewset.get_view(endpoint_type, method.lower())
service.add_view(method, view, klass=resource_cls, **view_args)
return service
def callback(context, name, ob):
# get the callbacks registred by the inner services
# and call them from here when the @resource classes are being
# scanned by venusian.
config = context.config.with_package(info.module)
# Storage is mandatory for resources.
if not hasattr(config.registry, 'storage'):
msg = 'Mandatory storage backend is missing from configuration.'
raise pyramid_exceptions.ConfigurationError(msg)
# A service for the list.
service = register_service('collection', config.registry.settings)
config.add_cornice_service(service)
# An optional one for record endpoint.
if getattr(viewset, 'record_path') is not None:
service = register_service('record', config.registry.settings)
config.add_cornice_service(service)
info = venusian.attach(resource_cls, callback, category='pyramid', depth=depth)
return callback
class UserResource(object):
"""Base resource class providing every endpoint."""
default_viewset = ViewSet
"""Default :class:`kinto.core.resource.viewset.ViewSet` class to use when
the resource is registered."""
default_model = Model
"""Default :class:`kinto.core.resource.model.Model` class to use for
interacting the :mod:`kinto.core.storage` and :mod:`kinto.core.permission`
backends."""
mapping = ResourceSchema()
"""Schema to validate records."""
def __init__(self, request, context=None):
# Models are isolated by user.
parent_id = self.get_parent_id(request)
# Authentication to storage is transmitted as is (cf. cloud_storage).
auth = request.headers.get('Authorization')
# ID generator by resource name in settings.
default_id_generator = request.registry.id_generators['']
resource_name = context.resource_name if context else ''
id_generator = request.registry.id_generators.get(resource_name,
default_id_generator)
self.model = self.default_model(
storage=request.registry.storage,
id_generator=id_generator,
collection_id=classname(self),
parent_id=parent_id,
auth=auth)
self.request = request
self.context = context
self.record_id = self.request.matchdict.get('id')
self.force_patch_update = False
# Log resource context.
logger.bind(collection_id=self.model.collection_id,
collection_timestamp=self.timestamp)
@reify
def timestamp(self):
"""Return the current collection timestamp.
:rtype: int
"""
try:
return self.model.timestamp()
except storage_exceptions.BackendError as e:
is_readonly = self.request.registry.settings['readonly']
if not is_readonly:
raise e
# If the instance is configured to be readonly, and if the
# collection is empty, the backend will try to bump the timestamp.
# It fails if the configured db user has not write privileges.
logger.exception(e)
error_msg = ("Collection timestamp cannot be written. "
"Records endpoint must be hit at least once from a "
"writable instance.")
raise http_error(HTTPServiceUnavailable(),
errno=ERRORS.BACKEND,
message=error_msg)
def get_parent_id(self, request):
"""Return the parent_id of the resource with regards to the current
request.
:param request:
The request used to create the resource.
:rtype: str
"""
return request.prefixed_userid
def _get_known_fields(self):
"""Return all the `field` defined in the ressource mapping."""
known_fields = [c.name for c in self.mapping.children] + \
[self.model.id_field,
self.model.modified_field,
self.model.deleted_field]
return known_fields
def is_known_field(self, field):
"""Return ``True`` if `field` is defined in the resource schema.
If the resource schema allows unknown fields, this will always return
``True``.
:param str field: Field name
:rtype: bool
"""
if self.mapping.get_option('preserve_unknown'):
return True
known_fields = self._get_known_fields()
# Test first level only: ``target.data.id`` -> ``target``
field = field.split('.', 1)[0]
return field in known_fields
#
# End-points
#
def collection_get(self):
"""Model ``GET`` endpoint: retrieve multiple records.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotModified` if
``If-None-Match`` header is provided and collection not
modified in the interim.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and collection modified
in the iterim.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPBadRequest`
if filters or sorting are invalid.
"""
self._add_timestamp_header(self.request.response)
self._add_cache_header(self.request.response)
self._raise_304_if_not_modified()
self._raise_412_if_modified()
headers = self.request.response.headers
filters = self._extract_filters()
limit = self._extract_limit()
sorting = self._extract_sorting(limit)
partial_fields = self._extract_partial_fields()
filter_fields = [f.field for f in filters]
include_deleted = self.model.modified_field in filter_fields
pagination_rules, offset = self._extract_pagination_rules_from_token(
limit, sorting)
records, total_records = self.model.get_records(
filters=filters,
sorting=sorting,
limit=limit,
pagination_rules=pagination_rules,
include_deleted=include_deleted)
offset = offset + len(records)
next_page = None
if limit and len(records) == limit and offset < total_records:
lastrecord = records[-1]
next_page = self._next_page_url(sorting, limit, lastrecord, offset)
headers['Next-Page'] = encode_header(next_page)
if partial_fields:
records = [
dict_subset(record, partial_fields)
for record in records
]
# Bind metric about response size.
logger.bind(nb_records=len(records), limit=limit)
headers['Total-Records'] = encode_header('%s' % total_records)
return self.postprocess(records)
def collection_post(self):
"""Model ``POST`` endpoint: create a record.
If the new record id conflicts against an existing one, the
posted record is ignored, and the existing record is returned, with
a ``200`` status.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and collection modified
in the iterim.
.. seealso::
Add custom behaviour by overriding
:meth:`kinto.core.resource.UserResource.process_record`
"""
new_record = self.request.validated.get('data', {})
try:
# Since ``id`` does not belong to schema, it is not in validated
# data. Must look up in body.
id_field = self.model.id_field
new_record[id_field] = _id = self.request.json['data'][id_field]
self._raise_400_if_invalid_id(_id)
existing = self._get_record_or_404(_id)
except (HTTPNotFound, KeyError, ValueError):
existing = None
self._raise_412_if_modified(record=existing)
if existing:
record = existing
action = ACTIONS.READ
else:
new_record = self.process_record(new_record)
record = self.model.create_record(new_record)
self.request.response.status_code = 201
action = ACTIONS.CREATE
return self.postprocess(record, action=action)
def collection_delete(self):
"""Model ``DELETE`` endpoint: delete multiple records.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and collection modified
in the iterim.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPBadRequest`
if filters are invalid.
"""
self._raise_412_if_modified()
filters = self._extract_filters()
records, _ = self.model.get_records(filters=filters)
deleted = self.model.delete_records(filters=filters)
action = len(deleted) > 0 and ACTIONS.DELETE or ACTIONS.READ
return self.postprocess(deleted, action=action, old=records)
def get(self):
"""Record ``GET`` endpoint: retrieve a record.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotModified` if
``If-None-Match`` header is provided and record not
modified in the interim.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
"""
self._raise_400_if_invalid_id(self.record_id)
record = self._get_record_or_404(self.record_id)
timestamp = record[self.model.modified_field]
self._add_timestamp_header(self.request.response, timestamp=timestamp)
self._add_cache_header(self.request.response)
self._raise_304_if_not_modified(record)
self._raise_412_if_modified(record)
partial_fields = self._extract_partial_fields()
if partial_fields:
record = dict_subset(record, partial_fields)
return self.postprocess(record)
def put(self):
"""Record ``PUT`` endpoint: create or replace the provided record and
return it.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
.. note::
If ``If-None-Match: *`` request header is provided, the
``PUT`` will succeed only if no record exists with this id.
.. seealso::
Add custom behaviour by overriding
:meth:`kinto.core.resource.UserResource.process_record`.
"""
self._raise_400_if_invalid_id(self.record_id)
id_field = self.model.id_field
existing = None
tombstones = None
try:
existing = self._get_record_or_404(self.record_id)
except HTTPNotFound:
# Look if this record used to exist (for preconditions check).
filter_by_id = Filter(id_field, self.record_id, COMPARISON.EQ)
tombstones, _ = self.model.get_records(filters=[filter_by_id],
include_deleted=True)
if len(tombstones) > 0:
existing = tombstones[0]
finally:
if existing:
self._raise_412_if_modified(existing)
# If `data` is not provided, use existing record (or empty if creation)
post_record = self.request.validated.get('data', existing) or {}
record_id = post_record.setdefault(id_field, self.record_id)
self._raise_400_if_id_mismatch(record_id, self.record_id)
new_record = self.process_record(post_record, old=existing)
if existing and not tombstones:
record = self.model.update_record(new_record)
else:
record = self.model.create_record(new_record)
self.request.response.status_code = 201
timestamp = record[self.model.modified_field]
self._add_timestamp_header(self.request.response, timestamp=timestamp)
action = existing and ACTIONS.UPDATE or ACTIONS.CREATE
return self.postprocess(record, action=action, old=existing)
def patch(self):
"""Record ``PATCH`` endpoint: modify a record and return its
new version.
If a request header ``Response-Behavior`` is set to ``light``,
only the fields whose value was changed are returned.
If set to ``diff``, only the fields whose value became different than
the one provided are returned.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
.. seealso::
Add custom behaviour by overriding
:meth:`kinto.core.resource.UserResource.apply_changes` or
:meth:`kinto.core.resource.UserResource.process_record`.
"""
self._raise_400_if_invalid_id(self.record_id)
existing = self._get_record_or_404(self.record_id)
self._raise_412_if_modified(existing)
try:
# `data` attribute may not be present if only perms are patched.
changes = self.request.json.get('data', {})
except ValueError:
# If no `data` nor `permissions` is provided in patch, reject!
# XXX: This should happen in schema instead (c.f. ShareableViewSet)
error_details = {
'name': 'data',
'description': 'Provide at least one of data or permissions',
}
raise_invalid(self.request, **error_details)
updated = self.apply_changes(existing, changes=changes)
record_id = updated.setdefault(self.model.id_field,
self.record_id)
self._raise_400_if_id_mismatch(record_id, self.record_id)
new_record = self.process_record(updated, old=existing)
changed_fields = [k for k in changes.keys()
if existing.get(k) != new_record.get(k)]
# Save in storage if necessary.
if changed_fields or self.force_patch_update:
new_record = self.model.update_record(new_record)
else:
# Behave as if storage would have added `id` and `last_modified`.
for extra_field in [self.model.modified_field,
self.model.id_field]:
new_record[extra_field] = existing[extra_field]
# Adjust response according to ``Response-Behavior`` header
body_behavior = self.request.headers.get('Response-Behavior', 'full')
if body_behavior.lower() == 'light':
# Only fields that were changed.
data = {k: new_record[k] for k in changed_fields}
elif body_behavior.lower() == 'diff':
# Only fields that are different from those provided.
data = {k: new_record[k] for k in changed_fields
if changes.get(k) != new_record.get(k)}
else:
data = new_record
timestamp = new_record.get(self.model.modified_field,
existing[self.model.modified_field])
self._add_timestamp_header(self.request.response, timestamp=timestamp)
return self.postprocess(data, action=ACTIONS.UPDATE, old=existing)
def delete(self):
"""Record ``DELETE`` endpoint: delete a record and return it.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
"""
self._raise_400_if_invalid_id(self.record_id)
record = self._get_record_or_404(self.record_id)
self._raise_412_if_modified(record)
# Retreive the last_modified information from a querystring if present.
last_modified = self.request.GET.get('last_modified')
if last_modified:
last_modified = native_value(last_modified.strip('"'))
if not isinstance(last_modified, six.integer_types):
error_details = {
'name': 'last_modified',
'location': 'querystring',
'description': 'Invalid value for %s' % last_modified
}
raise_invalid(self.request, **error_details)
# If less or equal than current record. Ignore it.
if last_modified <= record[self.model.modified_field]:
last_modified = None
deleted = self.model.delete_record(record, last_modified=last_modified)
return self.postprocess(deleted, action=ACTIONS.DELETE, old=record)
#
# Data processing
#
def process_record(self, new, old=None):
"""Hook for processing records before they reach storage, to introduce
specific logics on fields for example.
.. code-block:: python
def process_record(self, new, old=None):
new = super(MyResource, self).process_record(new, old)
version = old['version'] if old else 0
new['version'] = version + 1
return new
Or add extra validation based on request:
.. code-block:: python
from kinto.core.errors import raise_invalid
def process_record(self, new, old=None):
new = super(MyResource, self).process_record(new, old)
if new['browser'] not in request.headers['User-Agent']:
raise_invalid(self.request, name='browser', error='Wrong')
return new
:param dict new: the validated record to be created or updated.
:param dict old: the old record to be updated,
``None`` for creation endpoints.
:returns: the processed record.
:rtype: dict
"""
modified_field = self.model.modified_field
new_last_modified = new.get(modified_field)
# Drop the new last_modified if it is not an integer.
is_integer = isinstance(new_last_modified, int)
if not is_integer:
new.pop(modified_field, None)
return new
# Drop the new last_modified if lesser or equal to the old one.
is_less_or_equal = (old is not None and
new_last_modified <= old[modified_field])
if is_less_or_equal:
new.pop(modified_field, None)
return new
def apply_changes(self, record, changes):
"""Merge `changes` into `record` fields.
.. note::
This is used in the context of PATCH only.
Override this to control field changes at record level, for example:
.. code-block:: python
def apply_changes(self, record, changes):
# Ignore value change if inferior
if record['position'] > changes.get('position', -1):
changes.pop('position', None)
return super(MyResource, self).apply_changes(record, changes)
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPBadRequest`
if result does not comply with resource schema.
:returns: the new record with `changes` applied.
:rtype: dict
"""
for field, value in changes.items():
has_changed = record.get(field, value) != value
if self.mapping.is_readonly(field) and has_changed:
error_details = {
'name': field,
'description': 'Cannot modify {0}'.format(field)
}
raise_invalid(self.request, **error_details)
updated = record.copy()
# recursive patch and remove field if null attribute is passed (RFC 7396)
content_type = str(self.request.headers.get('Content-Type'))
if content_type == 'application/merge-patch+json':
recursive_update_dict(updated, changes, ignores=[None])
else:
updated.update(**changes)
try:
return self.mapping.deserialize(updated)
except colander.Invalid as e:
# Transform the errors we got from colander into Cornice errors.
# We could not rely on Service schema because the record should be
# validated only once the changes are applied
for field, error in e.asdict().items():
raise_invalid(self.request, name=field, description=error)
def postprocess(self, result, action=ACTIONS.READ, old=None):
body = {
'data': result
}
parent_id = self.get_parent_id(self.request)
self.request.notify_resource_event(parent_id=parent_id,
timestamp=self.timestamp,
data=result,
action=action,
old=old)
return body
#
# Internals
#
def _get_record_or_404(self, record_id):
"""Retrieve record from storage and raise ``404 Not found`` if missing.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
"""
if self.context and self.context.current_record:
# Set during authorization. Save a storage hit.
return self.context.current_record
try:
return self.model.get_record(record_id)
except storage_exceptions.RecordNotFoundError:
response = http_error(HTTPNotFound(),
errno=ERRORS.INVALID_RESOURCE_ID)
raise response
def _add_timestamp_header(self, response, timestamp=None):
"""Add current timestamp in response headers, when request comes in.
"""
if timestamp is None:
timestamp = self.timestamp
# Pyramid takes care of converting.
response.last_modified = timestamp / 1000.0
# Return timestamp as ETag.
response.headers['ETag'] = encode_header('"%s"' % timestamp)
def _add_cache_header(self, response):
"""Add Cache-Control and Expire headers, based a on a setting for the
current resource.
Cache headers will be set with anonymous requests only.
.. note::
The ``Cache-Control: no-cache`` response header does not prevent
caching in client. It will indicate the client to revalidate
the response content on each access. The client will send a
conditional request to the server and check that a
``304 Not modified`` is returned before serving content from cache.
"""
resource_name = self.context.resource_name if self.context else ''
setting_key = '%s_cache_expires_seconds' % resource_name
collection_expires = self.request.registry.settings.get(setting_key)
is_anonymous = self.request.prefixed_userid is None
if collection_expires and is_anonymous:
response.cache_expires(seconds=int(collection_expires))
else:
# Since `Expires` response header provides an HTTP data with a
# resolution in seconds, do not use Pyramid `cache_expires()` in
# order to omit it.
response.cache_control.no_cache = True
response.cache_control.no_store = True
def _raise_400_if_invalid_id(self, record_id):
"""Raise 400 if specified record id does not match the format excepted
by storage backends.
:raises: :class:`pyramid.httpexceptions.HTTPBadRequest`
"""
is_string = isinstance(record_id, six.string_types)
if not is_string or not self.model.id_generator.match(record_id):
error_details = {
'location': 'path',
'description': "Invalid record id"
}
raise_invalid(self.request, **error_details)
def _raise_304_if_not_modified(self, record=None):
"""Raise 304 if current timestamp is inferior to the one specified
in headers.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotModified`
"""
if_none_match = self.request.headers.get('If-None-Match')
if not if_none_match:
return
if_none_match = decode_header(if_none_match)
try:
if not (if_none_match[0] == if_none_match[-1] == '"'):
raise ValueError()
modified_since = int(if_none_match[1:-1])
except (IndexError, ValueError):
if if_none_match == '*':
return
error_details = {
'location': 'headers',
'description': "Invalid value for If-None-Match"
}
raise_invalid(self.request, **error_details)
if record:
current_timestamp = record[self.model.modified_field]
else:
current_timestamp = self.model.timestamp()
if current_timestamp <= modified_since:
response = HTTPNotModified()
self._add_timestamp_header(response, timestamp=current_timestamp)
raise response
def _raise_412_if_modified(self, record=None):
"""Raise 412 if current timestamp is superior to the one
specified in headers.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed`
"""
if_match = self.request.headers.get('If-Match')
if_none_match = self.request.headers.get('If-None-Match')
if not if_match and not if_none_match:
return
if_match = decode_header(if_match) if if_match else None
if record and if_none_match and decode_header(if_none_match) == '*':
if record.get(self.model.deleted_field, False):
# Tombstones should not prevent creation.
return
modified_since = -1 # Always raise.
elif if_match:
try:
if not (if_match[0] == if_match[-1] == '"'):
raise ValueError()
modified_since = int(if_match[1:-1])
except (IndexError, ValueError):
message = ("Invalid value for If-Match. The value should "
"be integer between double quotes.")
error_details = {
'location': 'headers',
'description': message
}
raise_invalid(self.request, **error_details)
else:
# In case _raise_304_if_not_modified() did not raise.
return
if record:
current_timestamp = record[self.model.modified_field]
else:
current_timestamp = self.model.timestamp()
if current_timestamp > modified_since:
error_msg = 'Resource was modified meanwhile'
details = {'existing': record} if record else {}
response = http_error(HTTPPreconditionFailed(),
errno=ERRORS.MODIFIED_MEANWHILE,
message=error_msg,
details=details)
self._add_timestamp_header(response, timestamp=current_timestamp)
raise response
def _raise_400_if_id_mismatch(self, new_id, record_id):
"""Raise 400 if the `new_id`, within the request body, does not match
the `record_id`, obtained from request path.
:raises: :class:`pyramid.httpexceptions.HTTPBadRequest`
"""
if new_id != record_id:
error_msg = 'Record id does not match existing record'
error_details = {
'name': self.model.id_field,
'description': error_msg
}
raise_invalid(self.request, **error_details)
def _extract_partial_fields(self):
"""Extract the fields to do the projection from QueryString parameters.
"""
fields = self.request.GET.get('_fields', None)
if fields:
fields = fields.split(',')
root_fields = [f.split('.')[0] for f in fields]
known_fields = self._get_known_fields()
invalid_fields = set(root_fields) - set(known_fields)
preserve_unknown = self.mapping.get_option('preserve_unknown')
if not preserve_unknown and invalid_fields:
error_msg = "Fields %s do not exist" % ','.join(invalid_fields)
error_details = {
'name': "Invalid _fields parameter",
'description': error_msg
}
raise_invalid(self.request, **error_details)
# Since id and last_modified are part of the synchronisation
# API, force their presence in payloads.
fields = fields + [self.model.id_field, self.model.modified_field]
return fields
def _extract_limit(self):
"""Extract limit value from QueryString parameters."""
paginate_by = self.request.registry.settings['paginate_by']
limit = self.request.GET.get('_limit', paginate_by)
if limit:
try:
limit = int(limit)
except ValueError:
error_details = {
'location': 'querystring',
'description': "_limit should be an integer"
}
raise_invalid(self.request, **error_details)
# If limit is higher than paginate_by setting, ignore it.
if limit and paginate_by:
limit = min(limit, paginate_by)
return limit
def _extract_filters(self, queryparams=None):
"""Extracts filters from QueryString parameters."""
if not queryparams:
queryparams = self.request.GET
filters = []
for param, paramvalue in queryparams.items():
param = param.strip()
error_details = {
'name': param,
'location': 'querystring',
'description': 'Invalid value for %s' % param
}
# Ignore specific fields
if param.startswith('_') and param not in ('_since',
'_to',
'_before'):
continue
# Handle the _since specific filter.
if param in ('_since', '_to', '_before'):
value = native_value(paramvalue.strip('"'))
if not isinstance(value, six.integer_types):
raise_invalid(self.request, **error_details)
if param == '_since':
operator = COMPARISON.GT
else:
if param == '_to':
message = ('_to is now deprecated, '
'you should use _before instead')
url = ('https://kinto.readthedocs.io/en/2.4.0/api/'
'resource.html#list-of-available-url-'
'parameters')
send_alert(self.request, message, url)
operator = COMPARISON.LT
filters.append(
Filter(self.model.modified_field, value, operator)
)
continue
allKeywords = '|'.join([i.name.lower() for i in COMPARISON])
m = re.match(r'^('+allKeywords+')_([\w\.]+)$', param)
if m:
keyword, field = m.groups()
operator = getattr(COMPARISON, keyword.upper())
else:
operator, field = COMPARISON.EQ, param
if not self.is_known_field(field):
error_msg = "Unknown filter field '{0}'".format(param)
error_details['description'] = error_msg
raise_invalid(self.request, **error_details)
value = native_value(paramvalue)
if operator in (COMPARISON.IN, COMPARISON.EXCLUDE):
value = set([native_value(v) for v in paramvalue.split(',')])
all_integers = all([isinstance(v, six.integer_types)
for v in value])
all_strings = all([isinstance(v, six.text_type)
for v in value])
has_invalid_value = (
(field == self.model.id_field and not all_strings) or
(field == self.model.modified_field and not all_integers)
)
if has_invalid_value:
raise_invalid(self.request, **error_details)
filters.append(Filter(field, value, operator))
return filters
def _extract_sorting(self, limit):
"""Extracts filters from QueryString parameters."""
specified = self.request.GET.get('_sort', '').split(',')
sorting = []
modified_field_used = self.model.modified_field in specified
for field in specified:
field = field.strip()
m = re.match(r'^([\-+]?)([\w\.]+)$', field)
if m:
order, field = m.groups()
if not self.is_known_field(field):
error_details = {
'location': 'querystring',
'description': "Unknown sort field '{0}'".format(field)
}
raise_invalid(self.request, **error_details)
direction = -1 if order == '-' else 1
sorting.append(Sort(field, direction))
if not modified_field_used:
# Add a sort by the ``modified_field`` in descending order
# useful for pagination
sorting.append(Sort(self.model.modified_field, -1))
return sorting
def _build_pagination_rules(self, sorting, last_record, rules=None):
"""Return the list of rules for a given sorting attribute and
last_record.
"""
if rules is None:
rules = []
rule = []
next_sorting = sorting[:-1]
for field, _ in next_sorting:
rule.append(Filter(field, last_record.get(field), COMPARISON.EQ))
field, direction = sorting[-1]
if direction == -1:
rule.append(Filter(field, last_record.get(field), COMPARISON.LT))
else:
rule.append(Filter(field, last_record.get(field), COMPARISON.GT))
rules.append(rule)
if len(next_sorting) == 0:
return rules
return self._build_pagination_rules(next_sorting, last_record, rules)
def _extract_pagination_rules_from_token(self, limit, sorting):
"""Get pagination params."""
queryparams = self.request.GET
token = queryparams.get('_token', None)
filters = []
offset = 0
if token:
try:
tokeninfo = json.loads(decode64(token))
if not isinstance(tokeninfo, dict):
raise ValueError()
last_record = tokeninfo['last_record']
offset = tokeninfo['offset']
except (ValueError, KeyError, TypeError):
error_msg = '_token has invalid content'
error_details = {
'location': 'querystring',
'description': error_msg
}
raise_invalid(self.request, **error_details)
filters = self._build_pagination_rules(sorting, last_record)
return filters, offset
def _next_page_url(self, sorting, limit, last_record, offset):
"""Build the Next-Page header from where we stopped."""
token = self._build_pagination_token(sorting, last_record, offset)
params = self.request.GET.copy()
params['_limit'] = limit
params['_token'] = token
service = self.request.current_service
next_page_url = self.request.route_url(service.name, _query=params,
**self.request.matchdict)
return next_page_url
def _build_pagination_token(self, sorting, last_record, offset):
"""Build a pagination token.
It is a base64 JSON object with the sorting fields values of
the last_record.
"""
token = {
'last_record': {},
'offset': offset
}
for field, _ in sorting:
token['last_record'][field] = last_record[field]
return encode64(json.dumps(token))
class ShareableResource(UserResource):
"""Shareable resources allow to set permissions on records, in order to
share their access or protect their modification.
"""
default_model = ShareableModel
default_viewset = ShareableViewSet
permissions = ('read', 'write')
"""List of allowed permissions names."""
def __init__(self, *args, **kwargs):
super(ShareableResource, self).__init__(*args, **kwargs)
# In base resource, PATCH only hit storage if no data has changed.
# Here, we force update because we add the current principal to
# the ``write`` ACE.
self.force_patch_update = True
# Required by the ShareableModel class.
self.model.permission = self.request.registry.permission
if self.request.prefixed_userid is None:
# The principal of an anonymous is system.Everyone
self.model.current_principal = Everyone
else:
self.model.current_principal = self.request.prefixed_userid
self.model.effective_principals = self.request.effective_principals
if self.context:
self.model.get_permission_object_id = functools.partial(
self.context.get_permission_object_id,
self.request)
def get_parent_id(self, request):
"""Unlike :class:`kinto.core.resource.UserResource`, records are not
isolated by user.
See https://github.com/mozilla-services/cliquet/issues/549
:returns: A constant empty value.
"""
return ''
def _extract_filters(self, queryparams=None):
"""Override default filters extraction from QueryString to allow
partial collection of records.
XXX: find more elegant approach to add custom filters.
"""
filters = super(ShareableResource, self)._extract_filters(queryparams)
ids = self.context.shared_ids
if ids is not None:
filter_by_id = Filter(self.model.id_field, ids, COMPARISON.IN)
filters.insert(0, filter_by_id)
return filters
def _raise_412_if_modified(self, record=None):
"""Do not provide the permissions among the record fields.
Ref: https://github.com/Kinto/kinto/issues/224
"""
if record:
record = record.copy()
record.pop(self.model.permissions_field, None)
return super(ShareableResource, self)._raise_412_if_modified(record)
def process_record(self, new, old=None):
"""Read permissions from request body, and in the case of ``PUT`` every
existing ACE is removed (using empty list).
"""
new = super(ShareableResource, self).process_record(new, old)
permissions = self.request.validated.get('permissions', {})
annotated = new.copy()
if permissions:
is_put = (self.request.method.lower() == 'put')
if is_put:
# Remove every existing ACEs using empty lists.
for perm in self.permissions:
permissions.setdefault(perm, [])
annotated[self.model.permissions_field] = permissions
return annotated
def postprocess(self, result, action=ACTIONS.READ, old=None):
"""Add ``permissions`` attribute in response body.
In the HTTP API, it was decided that ``permissions`` would reside
outside the ``data`` attribute.
"""
body = {}
if not isinstance(result, list):
# record endpoint.
perms = result.pop(self.model.permissions_field, None)
if perms is not None:
body['permissions'] = {k: list(p) for k, p in perms.items()}
if old:
# Remove permissions from event payload.
old.pop(self.model.permissions_field, None)
data = super(ShareableResource, self).postprocess(result, action, old)
body.update(data)
return body
| 1 | 9,971 | nitpick: you can name this variable just `details` | Kinto-kinto | py |
@@ -0,0 +1,18 @@
+#include "general.h"
+#include "parse.h"
+
+static void installJadeParser (const langType language)
+{
+ addTagRegex(language, "^mixin[ \t]*([a-sA-Z0-9_]+)", "\\1", "d,definition", NULL);
+}
+
+
+extern parserDefinition* JadeParser (void)
+{
+ static const char* extensions[] = { "jade", NULL };
+ parserDefinition* def = parserNew("Jade");
+ def->extensions = extensions;
+ def->initialize = installJadeParser;
+ def->method = METHOD_REGEX;
+ return def;
+} | 1 | 1 | 11,376 | The character class in the regex should probably be "[a-zA-Z0-9_]". | universal-ctags-ctags | c |
|
@@ -290,12 +290,6 @@ public class GlobalSettings {
s.put("pgpSignOnlyDialogCounter", Settings.versions(
new V(45, new IntegerRangeSetting(0, Integer.MAX_VALUE, 0))
));
- s.put("openPgpProvider", Settings.versions(
- new V(46, new StringSetting(K9.NO_OPENPGP_PROVIDER))
- ));
- s.put("openPgpSupportSignOnly", Settings.versions(
- new V(47, new BooleanSetting(false))
- ));
s.put("fontSizeMessageViewBCC", Settings.versions(
new V(48, new FontSizeSetting(FontSizes.FONT_DEFAULT))
)); | 1 | package com.fsck.k9.preferences;
import java.io.File;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import android.os.Environment;
import com.fsck.k9.Account;
import com.fsck.k9.Account.SortType;
import com.fsck.k9.FontSizes;
import com.fsck.k9.K9;
import com.fsck.k9.K9.NotificationHideSubject;
import com.fsck.k9.K9.NotificationQuickDelete;
import com.fsck.k9.K9.SplitViewMode;
import com.fsck.k9.K9.Theme;
import com.fsck.k9.R;
import com.fsck.k9.preferences.Settings.BooleanSetting;
import com.fsck.k9.preferences.Settings.ColorSetting;
import com.fsck.k9.preferences.Settings.EnumSetting;
import com.fsck.k9.preferences.Settings.FontSizeSetting;
import com.fsck.k9.preferences.Settings.IntegerRangeSetting;
import com.fsck.k9.preferences.Settings.InvalidSettingValueException;
import com.fsck.k9.preferences.Settings.PseudoEnumSetting;
import com.fsck.k9.preferences.Settings.SettingsDescription;
import com.fsck.k9.preferences.Settings.SettingsUpgrader;
import com.fsck.k9.preferences.Settings.StringSetting;
import com.fsck.k9.preferences.Settings.V;
import com.fsck.k9.preferences.Settings.WebFontSizeSetting;
import static com.fsck.k9.K9.LockScreenNotificationVisibility;
public class GlobalSettings {
static final Map<String, TreeMap<Integer, SettingsDescription>> SETTINGS;
private static final Map<Integer, SettingsUpgrader> UPGRADERS;
static {
Map<String, TreeMap<Integer, SettingsDescription>> s = new LinkedHashMap<>();
/*
* When adding new settings here, be sure to increment {@link Settings.VERSION}
* and use that for whatever you add here.
*/
s.put("animations", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("attachmentdefaultpath", Settings.versions(
new V(1, new DirectorySetting(Environment.getExternalStorageDirectory())),
new V(41, new DirectorySetting(Environment.getExternalStoragePublicDirectory(
Environment.DIRECTORY_DOWNLOADS)))
));
s.put("backgroundOperations", Settings.versions(
new V(1, new EnumSetting<>(K9.BACKGROUND_OPS.class, K9.BACKGROUND_OPS.WHEN_CHECKED_AUTO_SYNC))
));
s.put("changeRegisteredNameColor", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("confirmDelete", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("confirmDeleteStarred", Settings.versions(
new V(2, new BooleanSetting(false))
));
s.put("confirmSpam", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("confirmMarkAllRead", Settings.versions(
new V(44, new BooleanSetting(true))
));
s.put("countSearchMessages", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("enableDebugLogging", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("enableSensitiveLogging", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("fontSizeAccountDescription", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("fontSizeAccountName", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("fontSizeFolderName", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("fontSizeFolderStatus", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("fontSizeMessageComposeInput", Settings.versions(
new V(5, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("fontSizeMessageListDate", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("fontSizeMessageListPreview", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("fontSizeMessageListSender", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("fontSizeMessageListSubject", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("fontSizeMessageViewAdditionalHeaders", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("fontSizeMessageViewCC", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("fontSizeMessageViewContent", Settings.versions(
new V(1, new WebFontSizeSetting(3)),
new V(31, null)
));
s.put("fontSizeMessageViewDate", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("fontSizeMessageViewSender", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("fontSizeMessageViewSubject", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("fontSizeMessageViewTime", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("fontSizeMessageViewTo", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("gesturesEnabled", Settings.versions(
new V(1, new BooleanSetting(true)),
new V(4, new BooleanSetting(false))
));
s.put("hideSpecialAccounts", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("keyguardPrivacy", Settings.versions(
new V(1, new BooleanSetting(false)),
new V(12, null)
));
s.put("language", Settings.versions(
new V(1, new LanguageSetting())
));
s.put("measureAccounts", Settings.versions(
new V(1, new BooleanSetting(true))
));
s.put("messageListCheckboxes", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("messageListPreviewLines", Settings.versions(
new V(1, new IntegerRangeSetting(1, 100, 2))
));
s.put("messageListStars", Settings.versions(
new V(1, new BooleanSetting(true))
));
s.put("messageViewFixedWidthFont", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("messageViewReturnToList", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("messageViewShowNext", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("quietTimeEnabled", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("quietTimeEnds", Settings.versions(
new V(1, new TimeSetting("7:00"))
));
s.put("quietTimeStarts", Settings.versions(
new V(1, new TimeSetting("21:00"))
));
s.put("registeredNameColor", Settings.versions(
new V(1, new ColorSetting(0xFF00008F))
));
s.put("showContactName", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("showCorrespondentNames", Settings.versions(
new V(1, new BooleanSetting(true))
));
s.put("sortTypeEnum", Settings.versions(
new V(10, new EnumSetting<>(SortType.class, Account.DEFAULT_SORT_TYPE))
));
s.put("sortAscending", Settings.versions(
new V(10, new BooleanSetting(Account.DEFAULT_SORT_ASCENDING))
));
s.put("startIntegratedInbox", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("theme", Settings.versions(
new V(1, new ThemeSetting(K9.Theme.LIGHT))
));
s.put("messageViewTheme", Settings.versions(
new V(16, new ThemeSetting(K9.Theme.LIGHT)),
new V(24, new SubThemeSetting(K9.Theme.USE_GLOBAL))
));
s.put("useVolumeKeysForListNavigation", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("useVolumeKeysForNavigation", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("wrapFolderNames", Settings.versions(
new V(22, new BooleanSetting(false))
));
s.put("notificationHideSubject", Settings.versions(
new V(12, new EnumSetting<>(NotificationHideSubject.class, NotificationHideSubject.NEVER))
));
s.put("useBackgroundAsUnreadIndicator", Settings.versions(
new V(19, new BooleanSetting(true))
));
s.put("threadedView", Settings.versions(
new V(20, new BooleanSetting(true))
));
s.put("splitViewMode", Settings.versions(
new V(23, new EnumSetting<>(SplitViewMode.class, SplitViewMode.NEVER))
));
s.put("messageComposeTheme", Settings.versions(
new V(24, new SubThemeSetting(K9.Theme.USE_GLOBAL))
));
s.put("fixedMessageViewTheme", Settings.versions(
new V(24, new BooleanSetting(true))
));
s.put("showContactPicture", Settings.versions(
new V(25, new BooleanSetting(true))
));
s.put("autofitWidth", Settings.versions(
new V(28, new BooleanSetting(true))
));
s.put("colorizeMissingContactPictures", Settings.versions(
new V(29, new BooleanSetting(true))
));
s.put("messageViewDeleteActionVisible", Settings.versions(
new V(30, new BooleanSetting(true))
));
s.put("messageViewArchiveActionVisible", Settings.versions(
new V(30, new BooleanSetting(false))
));
s.put("messageViewMoveActionVisible", Settings.versions(
new V(30, new BooleanSetting(false))
));
s.put("messageViewCopyActionVisible", Settings.versions(
new V(30, new BooleanSetting(false))
));
s.put("messageViewSpamActionVisible", Settings.versions(
new V(30, new BooleanSetting(false))
));
s.put("fontSizeMessageViewContentPercent", Settings.versions(
new V(31, new IntegerRangeSetting(40, 250, 100))
));
s.put("hideUserAgent", Settings.versions(
new V(32, new BooleanSetting(false))
));
s.put("hideTimeZone", Settings.versions(
new V(32, new BooleanSetting(false))
));
s.put("lockScreenNotificationVisibility", Settings.versions(
new V(37, new EnumSetting<>(LockScreenNotificationVisibility.class,
LockScreenNotificationVisibility.MESSAGE_COUNT))
));
s.put("confirmDeleteFromNotification", Settings.versions(
new V(38, new BooleanSetting(true))
));
s.put("messageListSenderAboveSubject", Settings.versions(
new V(38, new BooleanSetting(false))
));
s.put("notificationQuickDelete", Settings.versions(
new V(38, new EnumSetting<>(NotificationQuickDelete.class, NotificationQuickDelete.NEVER))
));
s.put("notificationDuringQuietTimeEnabled", Settings.versions(
new V(39, new BooleanSetting(true))
));
s.put("confirmDiscardMessage", Settings.versions(
new V(40, new BooleanSetting(true))
));
s.put("pgpInlineDialogCounter", Settings.versions(
new V(43, new IntegerRangeSetting(0, Integer.MAX_VALUE, 0))
));
s.put("pgpSignOnlyDialogCounter", Settings.versions(
new V(45, new IntegerRangeSetting(0, Integer.MAX_VALUE, 0))
));
s.put("openPgpProvider", Settings.versions(
new V(46, new StringSetting(K9.NO_OPENPGP_PROVIDER))
));
s.put("openPgpSupportSignOnly", Settings.versions(
new V(47, new BooleanSetting(false))
));
s.put("fontSizeMessageViewBCC", Settings.versions(
new V(48, new FontSizeSetting(FontSizes.FONT_DEFAULT))
));
s.put("hideHostnameWhenConnecting", Settings.versions(
new V(49, new BooleanSetting(false))
));
SETTINGS = Collections.unmodifiableMap(s);
Map<Integer, SettingsUpgrader> u = new HashMap<>();
u.put(12, new SettingsUpgraderV12());
u.put(24, new SettingsUpgraderV24());
u.put(31, new SettingsUpgraderV31());
UPGRADERS = Collections.unmodifiableMap(u);
}
static Map<String, Object> validate(int version, Map<String, String> importedSettings) {
return Settings.validate(version, SETTINGS, importedSettings, false);
}
public static Set<String> upgrade(int version, Map<String, Object> validatedSettings) {
return Settings.upgrade(version, UPGRADERS, SETTINGS, validatedSettings);
}
public static Map<String, String> convert(Map<String, Object> settings) {
return Settings.convert(settings, SETTINGS);
}
static Map<String, String> getGlobalSettings(Storage storage) {
Map<String, String> result = new HashMap<>();
for (String key : SETTINGS.keySet()) {
String value = storage.getString(key, null);
if (value != null) {
result.put(key, value);
}
}
return result;
}
/**
* Upgrades the settings from version 11 to 12
*
* Map the 'keyguardPrivacy' value to the new NotificationHideSubject enum.
*/
private static class SettingsUpgraderV12 implements SettingsUpgrader {
@Override
public Set<String> upgrade(Map<String, Object> settings) {
Boolean keyguardPrivacy = (Boolean) settings.get("keyguardPrivacy");
if (keyguardPrivacy != null && keyguardPrivacy) {
// current setting: only show subject when unlocked
settings.put("notificationHideSubject", NotificationHideSubject.WHEN_LOCKED);
} else {
// always show subject [old default]
settings.put("notificationHideSubject", NotificationHideSubject.NEVER);
}
return new HashSet<>(Collections.singletonList("keyguardPrivacy"));
}
}
/**
* Upgrades the settings from version 23 to 24.
*
* <p>
* Set <em>messageViewTheme</em> to {@link K9.Theme#USE_GLOBAL} if <em>messageViewTheme</em> has
* the same value as <em>theme</em>.
* </p>
*/
private static class SettingsUpgraderV24 implements SettingsUpgrader {
@Override
public Set<String> upgrade(Map<String, Object> settings) {
K9.Theme messageViewTheme = (K9.Theme) settings.get("messageViewTheme");
K9.Theme theme = (K9.Theme) settings.get("theme");
if (theme != null && messageViewTheme != null && theme == messageViewTheme) {
settings.put("messageViewTheme", K9.Theme.USE_GLOBAL);
}
return null;
}
}
/**
* Upgrades the settings from version 30 to 31.
*
* <p>
* Convert value from <em>fontSizeMessageViewContent</em> to
* <em>fontSizeMessageViewContentPercent</em>.
* </p>
*/
public static class SettingsUpgraderV31 implements SettingsUpgrader {
@Override
public Set<String> upgrade(Map<String, Object> settings) {
int oldSize = (Integer) settings.get("fontSizeMessageViewContent");
int newSize = convertFromOldSize(oldSize);
settings.put("fontSizeMessageViewContentPercent", newSize);
return new HashSet<>(Collections.singletonList("fontSizeMessageViewContent"));
}
public static int convertFromOldSize(int oldSize) {
switch (oldSize) {
case 1: {
return 40;
}
case 2: {
return 75;
}
case 4: {
return 175;
}
case 5: {
return 250;
}
case 3:
default: {
return 100;
}
}
}
}
private static class LanguageSetting extends PseudoEnumSetting<String> {
private final Map<String, String> mapping;
LanguageSetting() {
super("");
Map<String, String> mapping = new HashMap<>();
String[] values = K9.app.getResources().getStringArray(R.array.settings_language_values);
for (String value : values) {
if (value.length() == 0) {
mapping.put("", "default");
} else {
mapping.put(value, value);
}
}
this.mapping = Collections.unmodifiableMap(mapping);
}
@Override
protected Map<String, String> getMapping() {
return mapping;
}
@Override
public String fromString(String value) throws InvalidSettingValueException {
if (mapping.containsKey(value)) {
return value;
}
throw new InvalidSettingValueException();
}
}
static class ThemeSetting extends SettingsDescription<K9.Theme> {
private static final String THEME_LIGHT = "light";
private static final String THEME_DARK = "dark";
ThemeSetting(K9.Theme defaultValue) {
super(defaultValue);
}
@Override
public K9.Theme fromString(String value) throws InvalidSettingValueException {
try {
Integer theme = Integer.parseInt(value);
if (theme == K9.Theme.LIGHT.ordinal() ||
// We used to store the resource ID of the theme in the preference storage,
// but don't use the database upgrade mechanism to update the values. So
// we have to deal with the old format here.
theme == android.R.style.Theme_Light) {
return K9.Theme.LIGHT;
} else if (theme == K9.Theme.DARK.ordinal() || theme == android.R.style.Theme) {
return K9.Theme.DARK;
}
} catch (NumberFormatException e) { /* do nothing */ }
throw new InvalidSettingValueException();
}
@Override
public K9.Theme fromPrettyString(String value) throws InvalidSettingValueException {
if (THEME_LIGHT.equals(value)) {
return K9.Theme.LIGHT;
} else if (THEME_DARK.equals(value)) {
return K9.Theme.DARK;
}
throw new InvalidSettingValueException();
}
@Override
public String toPrettyString(K9.Theme value) {
switch (value) {
case DARK: {
return THEME_DARK;
}
default: {
return THEME_LIGHT;
}
}
}
@Override
public String toString(K9.Theme value) {
return Integer.toString(value.ordinal());
}
}
private static class SubThemeSetting extends ThemeSetting {
private static final String THEME_USE_GLOBAL = "use_global";
SubThemeSetting(Theme defaultValue) {
super(defaultValue);
}
@Override
public K9.Theme fromString(String value) throws InvalidSettingValueException {
try {
Integer theme = Integer.parseInt(value);
if (theme == K9.Theme.USE_GLOBAL.ordinal()) {
return K9.Theme.USE_GLOBAL;
}
return super.fromString(value);
} catch (NumberFormatException e) {
throw new InvalidSettingValueException();
}
}
@Override
public K9.Theme fromPrettyString(String value) throws InvalidSettingValueException {
if (THEME_USE_GLOBAL.equals(value)) {
return K9.Theme.USE_GLOBAL;
}
return super.fromPrettyString(value);
}
@Override
public String toPrettyString(K9.Theme value) {
if (value == K9.Theme.USE_GLOBAL) {
return THEME_USE_GLOBAL;
}
return super.toPrettyString(value);
}
}
private static class TimeSetting extends SettingsDescription<String> {
TimeSetting(String defaultValue) {
super(defaultValue);
}
@Override
public String fromString(String value) throws InvalidSettingValueException {
if (!value.matches(TimePickerPreference.VALIDATION_EXPRESSION)) {
throw new InvalidSettingValueException();
}
return value;
}
}
private static class DirectorySetting extends SettingsDescription<String> {
DirectorySetting(File defaultPath) {
super(defaultPath.toString());
}
@Override
public String fromString(String value) throws InvalidSettingValueException {
try {
if (new File(value).isDirectory()) {
return value;
}
} catch (Exception e) { /* do nothing */ }
throw new InvalidSettingValueException();
}
}
}
| 1 | 16,677 | Please also increment `Settings.VERSION` and update `AccountSettings`. | k9mail-k-9 | java |
@@ -18,6 +18,12 @@ import (
"time"
)
+var builddate string // set by the Makefile
+
+func versionString() string {
+ return "Weave of " + builddate
+}
+
func ensureInterface(ifaceName string, wait bool) (iface *net.Interface, err error) {
iface, err = findInterface(ifaceName)
if err == nil || !wait { | 1 | package main
import (
"code.google.com/p/gopacket/layers"
"crypto/sha256"
"flag"
"fmt"
"github.com/davecheney/profile"
"github.com/zettio/weave"
"io"
"log"
"net"
"net/http"
"os"
"os/signal"
"runtime"
"syscall"
"time"
)
func ensureInterface(ifaceName string, wait bool) (iface *net.Interface, err error) {
iface, err = findInterface(ifaceName)
if err == nil || !wait {
return
}
log.Println("Waiting for interface", ifaceName, "to come up")
for err != nil {
time.Sleep(1 * time.Second)
iface, err = findInterface(ifaceName)
}
log.Println("Interface", ifaceName, "is up")
return
}
func findInterface(ifaceName string) (iface *net.Interface, err error) {
iface, err = net.InterfaceByName(ifaceName)
if err != nil {
return iface, fmt.Errorf("Unable to find interface %s", ifaceName)
}
if 0 == (net.FlagUp & iface.Flags) {
return iface, fmt.Errorf("Interface %s is not up", ifaceName)
}
return
}
func main() {
log.SetPrefix(weave.Protocol + " ")
log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)
procs := runtime.NumCPU()
// packet sniffing can block an OS thread, so we need one thread
// for that plus at least one more.
if procs < 2 {
procs = 2
}
runtime.GOMAXPROCS(procs)
var (
ifaceName string
routerName string
password string
wait bool
debug bool
prof string
peers []string
connLimit int
bufSz int
)
flag.StringVar(&ifaceName, "iface", "", "name of interface to read from")
flag.StringVar(&routerName, "name", "", "name of router (defaults to MAC)")
flag.StringVar(&password, "password", "", "network password")
flag.BoolVar(&wait, "wait", false, "wait for interface to be created and come up")
flag.BoolVar(&debug, "debug", false, "enable debug logging")
flag.StringVar(&prof, "profile", "", "enable profiling and write profiles to given path")
flag.IntVar(&connLimit, "connlimit", 10, "connection limit (defaults to 10, set to 0 for unlimited)")
flag.IntVar(&bufSz, "bufsz", 8, "capture buffer size in MB (defaults to 8MB)")
flag.Parse()
peers = flag.Args()
if ifaceName == "" {
fmt.Println("Missing required parameter 'iface'")
os.Exit(1)
}
iface, err := ensureInterface(ifaceName, wait)
if err != nil {
log.Fatal(err)
}
if connLimit < 0 {
connLimit = 0
}
if routerName == "" {
routerName = iface.HardwareAddr.String()
}
ourName, err := weave.PeerNameFromUserInput(routerName)
if err != nil {
log.Fatal(err)
}
var logFrame func(string, []byte, *layers.Ethernet)
if debug {
logFrame = func(prefix string, frame []byte, eth *layers.Ethernet) {
h := fmt.Sprintf("%x", sha256.Sum256(frame))
if eth == nil {
log.Println(prefix, len(frame), "bytes (", h, ")")
} else {
log.Println(prefix, len(frame), "bytes (", h, "):", eth.SrcMAC, "->", eth.DstMAC)
}
}
} else {
logFrame = func(prefix string, frame []byte, eth *layers.Ethernet) {}
}
if prof != "" {
p := *profile.CPUProfile
p.ProfilePath = prof
defer profile.Start(&p).Stop()
}
router := weave.NewRouter(iface, ourName, []byte(password), connLimit, bufSz*1024*1024, logFrame)
router.Start()
for _, peer := range peers {
targetPeer := peer
go func() {
if err := router.Ourself.CreateConnection(targetPeer, weave.UnknownPeerName); err != nil {
log.Println(err)
}
}()
}
go handleHttp(router)
handleSignals(router)
}
func handleHttp(router *weave.Router) {
http.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, router.Status())
})
address := fmt.Sprintf(":%d", weave.StatusPort)
err := http.ListenAndServe(address, nil)
if err != nil {
log.Fatal("Unable to create http listener: ", err)
}
}
func handleSignals(router *weave.Router) {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGQUIT, syscall.SIGUSR1)
buf := make([]byte, 1<<20)
for {
sig := <-sigs
switch sig {
case syscall.SIGQUIT:
runtime.Stack(buf, true)
log.Printf("=== received SIGQUIT ===\n*** goroutine dump...\n%s\n*** end\n", buf)
case syscall.SIGUSR1:
log.Printf("=== received SIGUSR1 ===\n*** status...\n%s\n*** end\n", router.Status())
}
}
}
| 1 | 7,168 | I think this should just say "build <date>". And when builddate wasn't set, e.g. somebody built weave with 'go build' or some other way, this should return "unknown". | weaveworks-weave | go |
@@ -142,7 +142,8 @@ public class BaseRewriteManifestsSparkAction
@Override
public RewriteManifests.Result execute() {
- JobGroupInfo info = newJobGroupInfo("REWRITE-MANIFESTS", "REWRITE-MANIFESTS");
+ JobGroupInfo info = newJobGroupInfo("REWRITE-MANIFESTS",
+ String.format("Rewriting manifests(staging location=%s) of %s", stagingLocation, table.name()));
return withJobGroupInfo(info, this::doExecute);
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.actions;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.UUID;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.HasTableOperations;
import org.apache.iceberg.ManifestFile;
import org.apache.iceberg.ManifestFiles;
import org.apache.iceberg.ManifestWriter;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Snapshot;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.actions.BaseRewriteManifestsActionResult;
import org.apache.iceberg.actions.RewriteManifests;
import org.apache.iceberg.exceptions.ValidationException;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.spark.JobGroupInfo;
import org.apache.iceberg.spark.SparkDataFile;
import org.apache.iceberg.spark.SparkUtil;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.PropertyUtil;
import org.apache.iceberg.util.Tasks;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.api.java.function.MapPartitionsFunction;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.sql.Column;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoder;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.internal.SQLConf;
import org.apache.spark.sql.types.StructType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.iceberg.MetadataTableType.ENTRIES;
/**
* An action that rewrites manifests in a distributed manner and co-locates metadata for partitions.
* <p>
* By default, this action rewrites all manifests for the current partition spec and writes the result
* to the metadata folder. The behavior can be modified by passing a custom predicate to {@link #rewriteIf(Predicate)}
* and a custom spec id to {@link #specId(int)}. In addition, there is a way to configure a custom location
* for new manifests via {@link #stagingLocation}.
*/
public class BaseRewriteManifestsSparkAction
extends BaseSnapshotUpdateSparkAction<RewriteManifests, RewriteManifests.Result>
implements RewriteManifests {
private static final Logger LOG = LoggerFactory.getLogger(BaseRewriteManifestsSparkAction.class);
private static final String USE_CACHING = "use-caching";
private static final boolean USE_CACHING_DEFAULT = true;
private final Encoder<ManifestFile> manifestEncoder;
private final Table table;
private final int formatVersion;
private final FileIO fileIO;
private final long targetManifestSizeBytes;
private PartitionSpec spec = null;
private Predicate<ManifestFile> predicate = manifest -> true;
private String stagingLocation = null;
public BaseRewriteManifestsSparkAction(SparkSession spark, Table table) {
super(spark);
this.manifestEncoder = Encoders.javaSerialization(ManifestFile.class);
this.table = table;
this.spec = table.spec();
this.targetManifestSizeBytes = PropertyUtil.propertyAsLong(
table.properties(),
TableProperties.MANIFEST_TARGET_SIZE_BYTES,
TableProperties.MANIFEST_TARGET_SIZE_BYTES_DEFAULT);
this.fileIO = SparkUtil.serializableFileIO(table);
// default the staging location to the metadata location
TableOperations ops = ((HasTableOperations) table).operations();
Path metadataFilePath = new Path(ops.metadataFileLocation("file"));
this.stagingLocation = metadataFilePath.getParent().toString();
// use the current table format version for new manifests
this.formatVersion = ops.current().formatVersion();
}
@Override
protected RewriteManifests self() {
return this;
}
@Override
public RewriteManifests specId(int specId) {
Preconditions.checkArgument(table.specs().containsKey(specId), "Invalid spec id %d", specId);
this.spec = table.specs().get(specId);
return this;
}
@Override
public RewriteManifests rewriteIf(Predicate<ManifestFile> newPredicate) {
this.predicate = newPredicate;
return this;
}
@Override
public RewriteManifests stagingLocation(String newStagingLocation) {
this.stagingLocation = newStagingLocation;
return this;
}
@Override
public RewriteManifests.Result execute() {
JobGroupInfo info = newJobGroupInfo("REWRITE-MANIFESTS", "REWRITE-MANIFESTS");
return withJobGroupInfo(info, this::doExecute);
}
private RewriteManifests.Result doExecute() {
List<ManifestFile> matchingManifests = findMatchingManifests();
if (matchingManifests.isEmpty()) {
return BaseRewriteManifestsActionResult.empty();
}
long totalSizeBytes = 0L;
int numEntries = 0;
for (ManifestFile manifest : matchingManifests) {
ValidationException.check(hasFileCounts(manifest), "No file counts in manifest: %s", manifest.path());
totalSizeBytes += manifest.length();
numEntries += manifest.addedFilesCount() + manifest.existingFilesCount() + manifest.deletedFilesCount();
}
int targetNumManifests = targetNumManifests(totalSizeBytes);
int targetNumManifestEntries = targetNumManifestEntries(numEntries, targetNumManifests);
Dataset<Row> manifestEntryDF = buildManifestEntryDF(matchingManifests);
List<ManifestFile> newManifests;
if (spec.fields().size() < 1) {
newManifests = writeManifestsForUnpartitionedTable(manifestEntryDF, targetNumManifests);
} else {
newManifests = writeManifestsForPartitionedTable(manifestEntryDF, targetNumManifests, targetNumManifestEntries);
}
replaceManifests(matchingManifests, newManifests);
return new BaseRewriteManifestsActionResult(matchingManifests, newManifests);
}
private Dataset<Row> buildManifestEntryDF(List<ManifestFile> manifests) {
Dataset<Row> manifestDF = spark()
.createDataset(Lists.transform(manifests, ManifestFile::path), Encoders.STRING())
.toDF("manifest");
Dataset<Row> manifestEntryDF = loadMetadataTable(table, ENTRIES)
.filter("status < 2") // select only live entries
.selectExpr("input_file_name() as manifest", "snapshot_id", "sequence_number", "data_file");
Column joinCond = manifestDF.col("manifest").equalTo(manifestEntryDF.col("manifest"));
return manifestEntryDF
.join(manifestDF, joinCond, "left_semi")
.select("snapshot_id", "sequence_number", "data_file");
}
private List<ManifestFile> writeManifestsForUnpartitionedTable(Dataset<Row> manifestEntryDF, int numManifests) {
Broadcast<FileIO> io = sparkContext().broadcast(fileIO);
StructType sparkType = (StructType) manifestEntryDF.schema().apply("data_file").dataType();
// we rely only on the target number of manifests for unpartitioned tables
// as we should not worry about having too much metadata per partition
long maxNumManifestEntries = Long.MAX_VALUE;
return manifestEntryDF
.repartition(numManifests)
.mapPartitions(
toManifests(io, maxNumManifestEntries, stagingLocation, formatVersion, spec, sparkType),
manifestEncoder
)
.collectAsList();
}
private List<ManifestFile> writeManifestsForPartitionedTable(
Dataset<Row> manifestEntryDF, int numManifests,
int targetNumManifestEntries) {
Broadcast<FileIO> io = sparkContext().broadcast(fileIO);
StructType sparkType = (StructType) manifestEntryDF.schema().apply("data_file").dataType();
// we allow the actual size of manifests to be 10% higher if the estimation is not precise enough
long maxNumManifestEntries = (long) (1.1 * targetNumManifestEntries);
return withReusableDS(manifestEntryDF, df -> {
Column partitionColumn = df.col("data_file.partition");
return df.repartitionByRange(numManifests, partitionColumn)
.sortWithinPartitions(partitionColumn)
.mapPartitions(
toManifests(io, maxNumManifestEntries, stagingLocation, formatVersion, spec, sparkType),
manifestEncoder
)
.collectAsList();
});
}
private <T, U> U withReusableDS(Dataset<T> ds, Function<Dataset<T>, U> func) {
Dataset<T> reusableDS;
boolean useCaching = PropertyUtil.propertyAsBoolean(options(), USE_CACHING, USE_CACHING_DEFAULT);
if (useCaching) {
reusableDS = ds.cache();
} else {
int parallelism = SQLConf.get().numShufflePartitions();
reusableDS = ds.repartition(parallelism).map((MapFunction<T, T>) value -> value, ds.exprEnc());
}
try {
return func.apply(reusableDS);
} finally {
if (useCaching) {
reusableDS.unpersist(false);
}
}
}
private List<ManifestFile> findMatchingManifests() {
Snapshot currentSnapshot = table.currentSnapshot();
if (currentSnapshot == null) {
return ImmutableList.of();
}
return currentSnapshot.dataManifests().stream()
.filter(manifest -> manifest.partitionSpecId() == spec.specId() && predicate.test(manifest))
.collect(Collectors.toList());
}
private int targetNumManifests(long totalSizeBytes) {
return (int) ((totalSizeBytes + targetManifestSizeBytes - 1) / targetManifestSizeBytes);
}
private int targetNumManifestEntries(int numEntries, int numManifests) {
return (numEntries + numManifests - 1) / numManifests;
}
private boolean hasFileCounts(ManifestFile manifest) {
return manifest.addedFilesCount() != null &&
manifest.existingFilesCount() != null &&
manifest.deletedFilesCount() != null;
}
private void replaceManifests(Iterable<ManifestFile> deletedManifests, Iterable<ManifestFile> addedManifests) {
try {
boolean snapshotIdInheritanceEnabled = PropertyUtil.propertyAsBoolean(
table.properties(),
TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED,
TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT);
org.apache.iceberg.RewriteManifests rewriteManifests = table.rewriteManifests();
deletedManifests.forEach(rewriteManifests::deleteManifest);
addedManifests.forEach(rewriteManifests::addManifest);
commit(rewriteManifests);
if (!snapshotIdInheritanceEnabled) {
// delete new manifests as they were rewritten before the commit
deleteFiles(Iterables.transform(addedManifests, ManifestFile::path));
}
} catch (Exception e) {
// delete all new manifests because the rewrite failed
deleteFiles(Iterables.transform(addedManifests, ManifestFile::path));
throw e;
}
}
private void deleteFiles(Iterable<String> locations) {
Tasks.foreach(locations)
.noRetry()
.suppressFailureWhenFinished()
.onFailure((location, exc) -> LOG.warn("Failed to delete: {}", location, exc))
.run(fileIO::deleteFile);
}
private static ManifestFile writeManifest(
List<Row> rows, int startIndex, int endIndex, Broadcast<FileIO> io,
String location, int format, PartitionSpec spec, StructType sparkType) throws IOException {
String manifestName = "optimized-m-" + UUID.randomUUID();
Path manifestPath = new Path(location, manifestName);
OutputFile outputFile = io.value().newOutputFile(FileFormat.AVRO.addExtension(manifestPath.toString()));
Types.StructType dataFileType = DataFile.getType(spec.partitionType());
SparkDataFile wrapper = new SparkDataFile(dataFileType, sparkType);
ManifestWriter<DataFile> writer = ManifestFiles.write(format, spec, outputFile, null);
try {
for (int index = startIndex; index < endIndex; index++) {
Row row = rows.get(index);
long snapshotId = row.getLong(0);
long sequenceNumber = row.getLong(1);
Row file = row.getStruct(2);
writer.existing(wrapper.wrap(file), snapshotId, sequenceNumber);
}
} finally {
writer.close();
}
return writer.toManifestFile();
}
private static MapPartitionsFunction<Row, ManifestFile> toManifests(
Broadcast<FileIO> io, long maxNumManifestEntries, String location,
int format, PartitionSpec spec, StructType sparkType) {
return rows -> {
List<Row> rowsAsList = Lists.newArrayList(rows);
if (rowsAsList.isEmpty()) {
return Collections.emptyIterator();
}
List<ManifestFile> manifests = Lists.newArrayList();
if (rowsAsList.size() <= maxNumManifestEntries) {
manifests.add(writeManifest(rowsAsList, 0, rowsAsList.size(), io, location, format, spec, sparkType));
} else {
int midIndex = rowsAsList.size() / 2;
manifests.add(writeManifest(rowsAsList, 0, midIndex, io, location, format, spec, sparkType));
manifests.add(writeManifest(rowsAsList, midIndex, rowsAsList.size(), io, location, format, spec, sparkType));
}
return manifests.iterator();
};
}
}
| 1 | 36,574 | Can we move this into a separate var and call it `desc` like below? | apache-iceberg | java |
@@ -19,6 +19,14 @@ package io.servicecomb.demo.pojo.server;
import java.util.Arrays;
import java.util.List;
+import javax.ws.rs.core.MediaType;
+
+import org.springframework.web.bind.annotation.RequestBody;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.RequestMethod;
+import org.springframework.web.bind.annotation.RequestParam;
+import org.springframework.web.bind.annotation.ResponseBody;
+
import io.servicecomb.demo.server.Test;
import io.servicecomb.demo.server.TestRequest;
import io.servicecomb.demo.server.User; | 1 | /*
* Copyright 2017 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicecomb.demo.pojo.server;
import java.util.Arrays;
import java.util.List;
import io.servicecomb.demo.server.Test;
import io.servicecomb.demo.server.TestRequest;
import io.servicecomb.demo.server.User;
import io.servicecomb.provider.pojo.RpcSchema;
import io.servicecomb.swagger.invocation.exception.InvocationException;
@RpcSchema(schemaId = "server")
public class TestImpl implements Test {
@Override
public String testStringArray(String[] arr) {
return String.format("arr is '%s'", Arrays.toString(arr));
}
@Override
public String getTestString(String code) {
return String.format("code is '%s'", String.valueOf(code));
}
@Override
public String postTestStatic(int code) {
return null;
}
private User doTest(int index, User user, List<User> users, byte[] data) {
if (user == null) {
user = new User();
}
user.setIndex(index);
int userCount = (users == null) ? 0 : users.size();
user.setName(user.getName() + ", users count:" + userCount);
return user;
}
@Override
public String testException(int code) {
String strCode = String.valueOf(code);
switch (code) {
case 200:
return strCode;
case 456:
throw new InvocationException(code, strCode, strCode + " error");
case 556:
throw new InvocationException(code, strCode, Arrays.asList(strCode + " error"));
case 557:
throw new InvocationException(code, strCode, Arrays.asList(Arrays.asList(strCode + " error")));
default:
break;
}
return "not expected";
}
@Override
public User splitParam(int index, User user) {
return doTest(index, user, null, null);
}
@Override
public User wrapParam(TestRequest request) {
if (request == null) {
return null;
}
return doTest(request.getIndex(), request.getUser(), request.getUsers(), request.getData());
}
@Override
public String addString(String[] strArr) {
String result = Arrays.toString(strArr);
System.out.println("addString: " + result);
return result;
}
}
| 1 | 7,260 | why RpcSchema need this annotations? | apache-servicecomb-java-chassis | java |
@@ -1673,7 +1673,9 @@ func (s *Server) debugSubscribers(sub *subscription, c *client, subject, reply s
}
// Cleanup the WC entry.
s.mu.Lock()
- delete(s.sys.replies, replySubj)
+ if s.sys != nil && s.sys.replies != nil {
+ delete(s.sys.replies, replySubj)
+ }
s.mu.Unlock()
// Send the response.
s.sendInternalAccountMsg(nil, reply, atomic.LoadInt32(&nsubs)) | 1 | // Copyright 2018-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net/http"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nats-server/v2/server/pse"
)
const (
accLookupReqTokens = 6
accLookupReqSubj = "$SYS.REQ.ACCOUNT.%s.CLAIMS.LOOKUP"
accPackReqSubj = "$SYS.REQ.CLAIMS.PACK"
accListReqSubj = "$SYS.REQ.CLAIMS.LIST"
accClaimsReqSubj = "$SYS.REQ.CLAIMS.UPDATE"
accDeleteReqSubj = "$SYS.REQ.CLAIMS.DELETE"
connectEventSubj = "$SYS.ACCOUNT.%s.CONNECT"
disconnectEventSubj = "$SYS.ACCOUNT.%s.DISCONNECT"
accReqSubj = "$SYS.REQ.ACCOUNT.%s.%s"
// kept for backward compatibility when using http resolver
// this overlaps with the names for events but you'd have to have the operator private key in order to succeed.
accUpdateEventSubjOld = "$SYS.ACCOUNT.%s.CLAIMS.UPDATE"
accUpdateEventSubjNew = "$SYS.REQ.ACCOUNT.%s.CLAIMS.UPDATE"
connsRespSubj = "$SYS._INBOX_.%s"
accConnsEventSubjNew = "$SYS.ACCOUNT.%s.SERVER.CONNS"
accConnsEventSubjOld = "$SYS.SERVER.ACCOUNT.%s.CONNS" // kept for backward compatibility
shutdownEventSubj = "$SYS.SERVER.%s.SHUTDOWN"
authErrorEventSubj = "$SYS.SERVER.%s.CLIENT.AUTH.ERR"
serverStatsSubj = "$SYS.SERVER.%s.STATSZ"
serverDirectReqSubj = "$SYS.REQ.SERVER.%s.%s"
serverPingReqSubj = "$SYS.REQ.SERVER.PING.%s"
serverStatsPingReqSubj = "$SYS.REQ.SERVER.PING" // use $SYS.REQ.SERVER.PING.STATSZ instead
leafNodeConnectEventSubj = "$SYS.ACCOUNT.%s.LEAFNODE.CONNECT"
remoteLatencyEventSubj = "$SYS.LATENCY.M2.%s"
inboxRespSubj = "$SYS._INBOX.%s.%s"
// FIXME(dlc) - Should account scope, even with wc for now, but later on
// we can then shard as needed.
accNumSubsReqSubj = "$SYS.REQ.ACCOUNT.NSUBS"
// These are for exported debug services. These are local to this server only.
accSubsSubj = "$SYS.DEBUG.SUBSCRIBERS"
shutdownEventTokens = 4
serverSubjectIndex = 2
accUpdateTokensNew = 6
accUpdateTokensOld = 5
accUpdateAccIdxOld = 2
accReqTokens = 5
accReqAccIndex = 3
)
// FIXME(dlc) - make configurable.
var eventsHBInterval = 30 * time.Second
// Used to send and receive messages from inside the server.
type internal struct {
account *Account
client *client
seq uint64
sid int
servers map[string]*serverUpdate
sweeper *time.Timer
stmr *time.Timer
replies map[string]msgHandler
sendq chan *pubMsg
resetCh chan struct{}
wg sync.WaitGroup
orphMax time.Duration
chkOrph time.Duration
statsz time.Duration
shash string
inboxPre string
}
// ServerStatsMsg is sent periodically with stats updates.
type ServerStatsMsg struct {
Server ServerInfo `json:"server"`
Stats ServerStats `json:"statsz"`
}
// ConnectEventMsg is sent when a new connection is made that is part of an account.
type ConnectEventMsg struct {
TypedEvent
Server ServerInfo `json:"server"`
Client ClientInfo `json:"client"`
}
// ConnectEventMsgType is the schema type for ConnectEventMsg
const ConnectEventMsgType = "io.nats.server.advisory.v1.client_connect"
// DisconnectEventMsg is sent when a new connection previously defined from a
// ConnectEventMsg is closed.
type DisconnectEventMsg struct {
TypedEvent
Server ServerInfo `json:"server"`
Client ClientInfo `json:"client"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
Reason string `json:"reason"`
}
// DisconnectEventMsgType is the schema type for DisconnectEventMsg
const DisconnectEventMsgType = "io.nats.server.advisory.v1.client_disconnect"
// AccountNumConns is an event that will be sent from a server that is tracking
// a given account when the number of connections changes. It will also HB
// updates in the absence of any changes.
type AccountNumConns struct {
TypedEvent
Server ServerInfo `json:"server"`
Account string `json:"acc"`
Conns int `json:"conns"`
LeafNodes int `json:"leafnodes"`
TotalConns int `json:"total_conns"`
}
const AccountNumConnsMsgType = "io.nats.server.advisory.v1.account_connections"
// accNumConnsReq is sent when we are starting to track an account for the first
// time. We will request others send info to us about their local state.
type accNumConnsReq struct {
Server ServerInfo `json:"server"`
Account string `json:"acc"`
}
// ServerInfo identifies remote servers.
type ServerInfo struct {
Name string `json:"name"`
Host string `json:"host"`
ID string `json:"id"`
Cluster string `json:"cluster,omitempty"`
Version string `json:"ver"`
Seq uint64 `json:"seq"`
JetStream bool `json:"jetstream"`
Time time.Time `json:"time"`
}
// ClientInfo is detailed information about the client forming a connection.
type ClientInfo struct {
Start *time.Time `json:"start,omitempty"`
Host string `json:"host,omitempty"`
ID uint64 `json:"id,omitempty"`
Account string `json:"acc"`
User string `json:"user,omitempty"`
Name string `json:"name,omitempty"`
Lang string `json:"lang,omitempty"`
Version string `json:"ver,omitempty"`
RTT time.Duration `json:"rtt,omitempty"`
Server string `json:"server,omitempty"`
Stop *time.Time `json:"stop,omitempty"`
}
// ServerStats hold various statistics that we will periodically send out.
type ServerStats struct {
Start time.Time `json:"start"`
Mem int64 `json:"mem"`
Cores int `json:"cores"`
CPU float64 `json:"cpu"`
Connections int `json:"connections"`
TotalConnections uint64 `json:"total_connections"`
ActiveAccounts int `json:"active_accounts"`
NumSubs uint32 `json:"subscriptions"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
SlowConsumers int64 `json:"slow_consumers"`
Routes []*RouteStat `json:"routes,omitempty"`
Gateways []*GatewayStat `json:"gateways,omitempty"`
}
// RouteStat holds route statistics.
type RouteStat struct {
ID uint64 `json:"rid"`
Name string `json:"name,omitempty"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
Pending int `json:"pending"`
}
// GatewayStat holds gateway statistics.
type GatewayStat struct {
ID uint64 `json:"gwid"`
Name string `json:"name"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
NumInbound int `json:"inbound_connections"`
}
// DataStats reports how may msg and bytes. Applicable for both sent and received.
type DataStats struct {
Msgs int64 `json:"msgs"`
Bytes int64 `json:"bytes"`
}
// Used for internally queueing up messages that the server wants to send.
type pubMsg struct {
acc *Account
sub string
rply string
si *ServerInfo
msg interface{}
last bool
}
// Used to track server updates.
type serverUpdate struct {
seq uint64
ltime time.Time
}
// TypedEvent is a event or advisory sent by the server that has nats type hints
// typically used for events that might be consumed by 3rd party event systems
type TypedEvent struct {
Type string `json:"type"`
ID string `json:"id"`
Time time.Time `json:"timestamp"`
}
// internalSendLoop will be responsible for serializing all messages that
// a server wants to send.
func (s *Server) internalSendLoop(wg *sync.WaitGroup) {
defer wg.Done()
RESET:
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.Unlock()
return
}
c := s.sys.client
resetCh := s.sys.resetCh
sysacc := s.sys.account
sendq := s.sys.sendq
id := s.info.ID
host := s.info.Host
servername := s.info.Name
seqp := &s.sys.seq
js := s.js != nil
cluster := s.info.Cluster
if s.gateway.enabled {
cluster = s.getGatewayName()
}
s.mu.Unlock()
// Warn when internal send queue is backed up past 75%
warnThresh := 3 * internalSendQLen / 4
warnFreq := time.Second
last := time.Now().Add(-warnFreq)
for s.eventsRunning() {
// Setup information for next message
if len(sendq) > warnThresh && time.Since(last) >= warnFreq {
s.Warnf("Internal system send queue > 75%%")
last = time.Now()
}
select {
case pm := <-sendq:
if pm.si != nil {
pm.si.Name = servername
pm.si.Host = host
pm.si.Cluster = cluster
pm.si.ID = id
pm.si.Seq = atomic.AddUint64(seqp, 1)
pm.si.Version = VERSION
pm.si.Time = time.Now()
pm.si.JetStream = js
}
var b []byte
if pm.msg != nil {
switch v := pm.msg.(type) {
case string:
b = []byte(v)
case []byte:
b = v
default:
b, _ = json.MarshalIndent(pm.msg, _EMPTY_, " ")
}
}
// Grab client lock.
c.mu.Lock()
// We can have an override for account here.
if pm.acc != nil {
c.acc = pm.acc
} else {
c.acc = sysacc
}
// Prep internal structures needed to send message.
c.pa.subject = []byte(pm.sub)
c.pa.size = len(b)
c.pa.szb = []byte(strconv.FormatInt(int64(len(b)), 10))
c.pa.reply = []byte(pm.rply)
trace := c.trace
c.mu.Unlock()
// Add in NL
b = append(b, _CRLF_...)
if trace {
c.traceInOp(fmt.Sprintf("PUB %s %s %d", c.pa.subject, c.pa.reply, c.pa.size), nil)
c.traceMsg(b)
}
// Process like a normal inbound msg.
c.processInboundClientMsg(b)
// See if we are doing graceful shutdown.
if !pm.last {
c.flushClients(0) // Never spend time in place.
} else {
// For the Shutdown event, we need to send in place otherwise
// there is a chance that the process will exit before the
// writeLoop has a chance to send it.
c.flushClients(time.Second)
return
}
case <-resetCh:
goto RESET
case <-s.quitCh:
return
}
}
}
// Will send a shutdown message.
func (s *Server) sendShutdownEvent() {
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.Unlock()
return
}
subj := fmt.Sprintf(shutdownEventSubj, s.info.ID)
sendq := s.sys.sendq
// Stop any more messages from queueing up.
s.sys.sendq = nil
// Unhook all msgHandlers. Normal client cleanup will deal with subs, etc.
s.sys.replies = nil
s.mu.Unlock()
// Send to the internal queue and mark as last.
sendq <- &pubMsg{nil, subj, _EMPTY_, nil, nil, true}
}
// Used to send an internal message to an arbitrary account.
func (s *Server) sendInternalAccountMsg(a *Account, subject string, msg interface{}) error {
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.Unlock()
return ErrNoSysAccount
}
sendq := s.sys.sendq
// Don't hold lock while placing on the channel.
s.mu.Unlock()
sendq <- &pubMsg{a, subject, "", nil, msg, false}
return nil
}
// This will queue up a message to be sent.
// Lock should not be held.
func (s *Server) sendInternalMsgLocked(sub, rply string, si *ServerInfo, msg interface{}) {
s.mu.Lock()
s.sendInternalMsg(sub, rply, si, msg)
s.mu.Unlock()
}
// This will queue up a message to be sent.
// Assumes lock is held on entry.
func (s *Server) sendInternalMsg(sub, rply string, si *ServerInfo, msg interface{}) {
if s.sys == nil || s.sys.sendq == nil {
return
}
sendq := s.sys.sendq
// Don't hold lock while placing on the channel.
s.mu.Unlock()
sendq <- &pubMsg{nil, sub, rply, si, msg, false}
s.mu.Lock()
}
// Locked version of checking if events system running. Also checks server.
func (s *Server) eventsRunning() bool {
s.mu.Lock()
er := s.running && s.eventsEnabled()
s.mu.Unlock()
return er
}
// EventsEnabled will report if the server has internal events enabled via
// a defined system account.
func (s *Server) EventsEnabled() bool {
s.mu.Lock()
ee := s.eventsEnabled()
s.mu.Unlock()
return ee
}
// eventsEnabled will report if events are enabled.
// Lock should be held.
func (s *Server) eventsEnabled() bool {
return s.sys != nil && s.sys.client != nil && s.sys.account != nil
}
// TrackedRemoteServers returns how many remote servers we are tracking
// from a system events perspective.
func (s *Server) TrackedRemoteServers() int {
s.mu.Lock()
if !s.running || !s.eventsEnabled() {
return -1
}
ns := len(s.sys.servers)
s.mu.Unlock()
return ns
}
// Check for orphan servers who may have gone away without notification.
// This should be wrapChk() to setup common locking.
func (s *Server) checkRemoteServers() {
now := time.Now()
for sid, su := range s.sys.servers {
if now.Sub(su.ltime) > s.sys.orphMax {
s.Debugf("Detected orphan remote server: %q", sid)
// Simulate it going away.
s.processRemoteServerShutdown(sid)
delete(s.sys.servers, sid)
}
}
if s.sys.sweeper != nil {
s.sys.sweeper.Reset(s.sys.chkOrph)
}
}
// Grab RSS and PCPU
func updateServerUsage(v *ServerStats) {
var rss, vss int64
var pcpu float64
pse.ProcUsage(&pcpu, &rss, &vss)
v.Mem = rss
v.CPU = pcpu
v.Cores = numCores
}
// Generate a route stat for our statz update.
func routeStat(r *client) *RouteStat {
if r == nil {
return nil
}
r.mu.Lock()
rs := &RouteStat{
ID: r.cid,
Sent: DataStats{
Msgs: atomic.LoadInt64(&r.outMsgs),
Bytes: atomic.LoadInt64(&r.outBytes),
},
Received: DataStats{
Msgs: atomic.LoadInt64(&r.inMsgs),
Bytes: atomic.LoadInt64(&r.inBytes),
},
Pending: int(r.out.pb),
}
if r.route != nil {
rs.Name = r.route.remoteName
}
r.mu.Unlock()
return rs
}
// Actual send method for statz updates.
// Lock should be held.
func (s *Server) sendStatsz(subj string) {
m := ServerStatsMsg{}
updateServerUsage(&m.Stats)
m.Stats.Start = s.start
m.Stats.Connections = len(s.clients)
m.Stats.TotalConnections = s.totalClients
m.Stats.ActiveAccounts = int(atomic.LoadInt32(&s.activeAccounts))
m.Stats.Received.Msgs = atomic.LoadInt64(&s.inMsgs)
m.Stats.Received.Bytes = atomic.LoadInt64(&s.inBytes)
m.Stats.Sent.Msgs = atomic.LoadInt64(&s.outMsgs)
m.Stats.Sent.Bytes = atomic.LoadInt64(&s.outBytes)
m.Stats.SlowConsumers = atomic.LoadInt64(&s.slowConsumers)
m.Stats.NumSubs = s.numSubscriptions()
for _, r := range s.routes {
m.Stats.Routes = append(m.Stats.Routes, routeStat(r))
}
if s.gateway.enabled {
gw := s.gateway
gw.RLock()
for name, c := range gw.out {
gs := &GatewayStat{Name: name}
c.mu.Lock()
gs.ID = c.cid
gs.Sent = DataStats{
Msgs: atomic.LoadInt64(&c.outMsgs),
Bytes: atomic.LoadInt64(&c.outBytes),
}
c.mu.Unlock()
// Gather matching inbound connections
gs.Received = DataStats{}
for _, c := range gw.in {
c.mu.Lock()
if c.gw.name == name {
gs.Received.Msgs += atomic.LoadInt64(&c.inMsgs)
gs.Received.Bytes += atomic.LoadInt64(&c.inBytes)
gs.NumInbound++
}
c.mu.Unlock()
}
m.Stats.Gateways = append(m.Stats.Gateways, gs)
}
gw.RUnlock()
}
s.sendInternalMsg(subj, _EMPTY_, &m.Server, &m)
}
// Send out our statz update.
// This should be wrapChk() to setup common locking.
func (s *Server) heartbeatStatsz() {
if s.sys.stmr != nil {
s.sys.stmr.Reset(s.sys.statsz)
}
s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID))
}
// This should be wrapChk() to setup common locking.
func (s *Server) startStatszTimer() {
s.sys.stmr = time.AfterFunc(s.sys.statsz, s.wrapChk(s.heartbeatStatsz))
}
// Start a ticker that will fire periodically and check for orphaned servers.
// This should be wrapChk() to setup common locking.
func (s *Server) startRemoteServerSweepTimer() {
s.sys.sweeper = time.AfterFunc(s.sys.chkOrph, s.wrapChk(s.checkRemoteServers))
}
// Length of our system hash used for server targeted messages.
const sysHashLen = 8
// Computes a hash of 8 characters for the name.
func getHash(name string) []byte {
return getHashSize(name, sysHashLen)
}
// This will setup our system wide tracking subs.
// For now we will setup one wildcard subscription to
// monitor all accounts for changes in number of connections.
// We can make this on a per account tracking basis if needed.
// Tradeoff is subscription and interest graph events vs connect and
// disconnect events, etc.
func (s *Server) initEventTracking() {
if !s.EventsEnabled() {
return
}
// Create a system hash which we use for other servers to target us specifically.
s.sys.shash = string(getHash(s.info.Name))
// This will be for all inbox responses.
subject := fmt.Sprintf(inboxRespSubj, s.sys.shash, "*")
if _, err := s.sysSubscribe(subject, s.inboxReply); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
s.sys.inboxPre = subject
// This is for remote updates for connection accounting.
subject = fmt.Sprintf(accConnsEventSubjOld, "*")
if _, err := s.sysSubscribe(subject, s.remoteConnsUpdate); err != nil {
s.Errorf("Error setting up internal tracking for %s: %v", subject, err)
}
// This will be for responses for account info that we send out.
subject = fmt.Sprintf(connsRespSubj, s.info.ID)
if _, err := s.sysSubscribe(subject, s.remoteConnsUpdate); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for broad requests to respond with number of subscriptions for a given subject.
if _, err := s.sysSubscribe(accNumSubsReqSubj, s.nsubsRequest); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for all server shutdowns.
subject = fmt.Sprintf(shutdownEventSubj, "*")
if _, err := s.sysSubscribe(subject, s.remoteServerShutdown); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for account claims updates.
subscribeToUpdate := true
if s.accResolver != nil {
subscribeToUpdate = !s.accResolver.IsTrackingUpdate()
}
if subscribeToUpdate {
for _, sub := range []string{accUpdateEventSubjOld, accUpdateEventSubjNew} {
if _, err := s.sysSubscribe(fmt.Sprintf(sub, "*"), s.accountClaimUpdate); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
}
}
// Listen for ping messages that will be sent to all servers for statsz.
// This subscription is kept for backwards compatibility. Got replaced by ...PING.STATZ from below
if _, err := s.sysSubscribe(serverStatsPingReqSubj, s.statszReq); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
monSrvc := map[string]msgHandler{
"STATSZ": s.statszReq,
"VARZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &VarzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Varz(&optz.VarzOptions) })
},
"SUBSZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &SubszEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Subsz(&optz.SubszOptions) })
},
"CONNZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &ConnzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Connz(&optz.ConnzOptions) })
},
"ROUTEZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &RoutezEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Routez(&optz.RoutezOptions) })
},
"GATEWAYZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &GatewayzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Gatewayz(&optz.GatewayzOptions) })
},
"LEAFZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &LeafzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Leafz(&optz.LeafzOptions) })
},
"ACCOUNTZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &AccountzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Accountz(&optz.AccountzOptions) })
},
}
for name, req := range monSrvc {
subject = fmt.Sprintf(serverDirectReqSubj, s.info.ID, name)
if _, err := s.sysSubscribe(subject, req); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
subject = fmt.Sprintf(serverPingReqSubj, name)
if _, err := s.sysSubscribe(subject, req); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
}
extractAccount := func(subject string) (string, error) {
if tk := strings.Split(subject, tsep); len(tk) != accReqTokens {
return "", fmt.Errorf("subject %q is malformed", subject)
} else {
return tk[accReqAccIndex], nil
}
}
monAccSrvc := map[string]msgHandler{
"SUBSZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &SubszEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
optz.SubszOptions.Subscriptions = true
optz.SubszOptions.Account = acc
return s.Subsz(&optz.SubszOptions)
}
})
},
"CONNZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &ConnzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
optz.ConnzOptions.Account = acc
return s.Connz(&optz.ConnzOptions)
}
})
},
"LEAFZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &LeafzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
optz.LeafzOptions.Account = acc
return s.Leafz(&optz.LeafzOptions)
}
})
},
"INFO": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &AccInfoEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
return s.accountInfo(acc)
}
})
},
"CONNS": s.connsRequest,
}
for name, req := range monAccSrvc {
if _, err := s.sysSubscribe(fmt.Sprintf(accReqSubj, "*", name), req); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
}
// Listen for updates when leaf nodes connect for a given account. This will
// force any gateway connections to move to `modeInterestOnly`
subject = fmt.Sprintf(leafNodeConnectEventSubj, "*")
if _, err := s.sysSubscribe(subject, s.leafNodeConnected); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// For tracking remote latency measurements.
subject = fmt.Sprintf(remoteLatencyEventSubj, s.sys.shash)
if _, err := s.sysSubscribe(subject, s.remoteLatencyUpdate); err != nil {
s.Errorf("Error setting up internal latency tracking: %v", err)
}
// This is for simple debugging of number of subscribers that exist in the system.
if _, err := s.sysSubscribeInternal(accSubsSubj, s.debugSubscribers); err != nil {
s.Errorf("Error setting up internal debug service for subscribers: %v", err)
}
}
// add all exports a system account will need
func (s *Server) addSystemAccountExports(sacc *Account) {
if !s.EventsEnabled() {
return
}
if err := sacc.AddServiceExport(accSubsSubj, nil); err != nil {
s.Errorf("Error adding system service export for %q: %v", accSubsSubj, err)
}
}
// accountClaimUpdate will receive claim updates for accounts.
func (s *Server) accountClaimUpdate(sub *subscription, _ *client, subject, resp string, msg []byte) {
if !s.EventsEnabled() {
return
}
pubKey := ""
toks := strings.Split(subject, tsep)
if len(toks) == accUpdateTokensNew {
pubKey = toks[accReqAccIndex]
} else if len(toks) == accUpdateTokensOld {
pubKey = toks[accUpdateAccIdxOld]
} else {
s.Debugf("Received account claims update on bad subject %q", subject)
return
}
if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil {
respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err)
} else if claim.Subject != pubKey {
err := errors.New("subject does not match jwt content")
respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err)
} else if v, ok := s.accounts.Load(pubKey); !ok {
respondToUpdate(s, resp, pubKey, "jwt update skipped", nil)
} else if err := s.updateAccountWithClaimJWT(v.(*Account), string(msg)); err != nil {
respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err)
} else {
respondToUpdate(s, resp, pubKey, "jwt updated", nil)
}
}
// processRemoteServerShutdown will update any affected accounts.
// Will update the remote count for clients.
// Lock assume held.
func (s *Server) processRemoteServerShutdown(sid string) {
s.accounts.Range(func(k, v interface{}) bool {
v.(*Account).removeRemoteServer(sid)
return true
})
}
// remoteServerShutdownEvent is called when we get an event from another server shutting down.
func (s *Server) remoteServerShutdown(sub *subscription, _ *client, subject, reply string, msg []byte) {
s.mu.Lock()
defer s.mu.Unlock()
if !s.eventsEnabled() {
return
}
toks := strings.Split(subject, tsep)
if len(toks) < shutdownEventTokens {
s.Debugf("Received remote server shutdown on bad subject %q", subject)
return
}
sid := toks[serverSubjectIndex]
su := s.sys.servers[sid]
if su != nil {
s.processRemoteServerShutdown(sid)
}
}
// updateRemoteServer is called when we have an update from a remote server.
// This allows us to track remote servers, respond to shutdown messages properly,
// make sure that messages are ordered, and allow us to prune dead servers.
// Lock should be held upon entry.
func (s *Server) updateRemoteServer(ms *ServerInfo) {
su := s.sys.servers[ms.ID]
if su == nil {
s.sys.servers[ms.ID] = &serverUpdate{ms.Seq, time.Now()}
s.processNewServer(ms)
} else {
// Should always be going up.
if ms.Seq <= su.seq {
s.Errorf("Received out of order remote server update from: %q", ms.ID)
return
}
su.seq = ms.Seq
su.ltime = time.Now()
}
}
// processNewServer will hold any logic we want to use when we discover a new server.
// Lock should be held upon entry.
func (s *Server) processNewServer(ms *ServerInfo) {
// Right now we only check if we have leafnode servers and if so send another
// connect update to make sure they switch this account to interest only mode.
s.ensureGWsInterestOnlyForLeafNodes()
}
// If GW is enabled on this server and there are any leaf node connections,
// this function will send a LeafNode connect system event to the super cluster
// to ensure that the GWs are in interest-only mode for this account.
// Lock should be held upon entry.
// TODO(dlc) - this will cause this account to be loaded on all servers. Need a better
// way with GW2.
func (s *Server) ensureGWsInterestOnlyForLeafNodes() {
if !s.gateway.enabled || len(s.leafs) == 0 {
return
}
sent := make(map[*Account]bool, len(s.leafs))
for _, c := range s.leafs {
if !sent[c.acc] {
s.sendLeafNodeConnectMsg(c.acc.Name)
sent[c.acc] = true
}
}
}
// shutdownEventing will clean up all eventing state.
func (s *Server) shutdownEventing() {
if !s.eventsRunning() {
return
}
s.mu.Lock()
clearTimer(&s.sys.sweeper)
clearTimer(&s.sys.stmr)
s.mu.Unlock()
// We will queue up a shutdown event and wait for the
// internal send loop to exit.
s.sendShutdownEvent()
s.sys.wg.Wait()
close(s.sys.resetCh)
s.mu.Lock()
defer s.mu.Unlock()
// Whip through all accounts.
s.accounts.Range(func(k, v interface{}) bool {
v.(*Account).clearEventing()
return true
})
// Turn everything off here.
s.sys = nil
}
// Request for our local connection count.
func (s *Server) connsRequest(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.eventsRunning() {
return
}
tk := strings.Split(subject, tsep)
if len(tk) != accReqTokens {
s.sys.client.Errorf("Bad subject account connections request message")
return
}
a := tk[accReqAccIndex]
m := accNumConnsReq{Account: a}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account connections request message: %v", err)
return
}
if m.Account != a {
s.sys.client.Errorf("Error unmarshalled account does not match subject")
return
}
// Here we really only want to lookup the account if its local. We do not want to fetch this
// account if we have no interest in it.
var acc *Account
if v, ok := s.accounts.Load(m.Account); ok {
acc = v.(*Account)
}
if acc == nil {
return
}
// We know this is a local connection.
if nlc := acc.NumLocalConnections(); nlc > 0 {
s.mu.Lock()
s.sendAccConnsUpdate(acc, reply)
s.mu.Unlock()
}
}
// leafNodeConnected is an event we will receive when a leaf node for a given account connects.
func (s *Server) leafNodeConnected(sub *subscription, _ *client, subject, reply string, msg []byte) {
m := accNumConnsReq{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account connections request message: %v", err)
return
}
s.mu.Lock()
na := m.Account == "" || !s.eventsEnabled() || !s.gateway.enabled
s.mu.Unlock()
if na {
return
}
if acc, _ := s.lookupAccount(m.Account); acc != nil {
s.switchAccountToInterestMode(acc.Name)
}
}
// Common filter options for system requests STATSZ VARZ SUBSZ CONNZ ROUTEZ GATEWAYZ LEAFZ
type EventFilterOptions struct {
Name string `json:"server_name,omitempty"` // filter by server name
Cluster string `json:"cluster,omitempty"` // filter by cluster name
Host string `json:"host,omitempty"` // filter by host name
}
// StatszEventOptions are options passed to Statsz
type StatszEventOptions struct {
// No actual options yet
EventFilterOptions
}
// Options for account Info
type AccInfoEventOptions struct {
// No actual options yet
EventFilterOptions
}
// In the context of system events, ConnzEventOptions are options passed to Connz
type ConnzEventOptions struct {
ConnzOptions
EventFilterOptions
}
// In the context of system events, RoutezEventOptions are options passed to Routez
type RoutezEventOptions struct {
RoutezOptions
EventFilterOptions
}
// In the context of system events, SubzEventOptions are options passed to Subz
type SubszEventOptions struct {
SubszOptions
EventFilterOptions
}
// In the context of system events, VarzEventOptions are options passed to Varz
type VarzEventOptions struct {
VarzOptions
EventFilterOptions
}
// In the context of system events, GatewayzEventOptions are options passed to Gatewayz
type GatewayzEventOptions struct {
GatewayzOptions
EventFilterOptions
}
// In the context of system events, LeafzEventOptions are options passed to Leafz
type LeafzEventOptions struct {
LeafzOptions
EventFilterOptions
}
// In the context of system events, AccountzEventOptions are options passed to Accountz
type AccountzEventOptions struct {
AccountzOptions
EventFilterOptions
}
// returns true if the request does NOT apply to this server and can be ignored.
// DO NOT hold the server lock when
func (s *Server) filterRequest(fOpts *EventFilterOptions) bool {
if fOpts.Name != "" && !strings.Contains(s.info.Name, fOpts.Name) {
return true
}
if fOpts.Host != "" && !strings.Contains(s.info.Host, fOpts.Host) {
return true
}
if fOpts.Cluster != "" {
s.mu.Lock()
cluster := s.info.Cluster
s.mu.Unlock()
if !strings.Contains(cluster, fOpts.Cluster) {
return true
}
}
return false
}
// statszReq is a request for us to respond with current statsz.
func (s *Server) statszReq(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.EventsEnabled() || reply == _EMPTY_ {
return
}
opts := StatszEventOptions{}
if len(msg) != 0 {
if err := json.Unmarshal(msg, &opts); err != nil {
server := &ServerInfo{}
response := map[string]interface{}{"server": server}
response["error"] = map[string]interface{}{
"code": http.StatusBadRequest,
"description": err.Error(),
}
s.sendInternalMsgLocked(reply, _EMPTY_, server, response)
return
} else if ignore := s.filterRequest(&opts.EventFilterOptions); ignore {
return
}
}
s.mu.Lock()
s.sendStatsz(reply)
s.mu.Unlock()
}
func (s *Server) zReq(reply string, msg []byte, fOpts *EventFilterOptions, optz interface{}, respf func() (interface{}, error)) {
if !s.EventsEnabled() || reply == _EMPTY_ {
return
}
server := &ServerInfo{}
response := map[string]interface{}{"server": server}
var err error
status := 0
if len(msg) != 0 {
if err = json.Unmarshal(msg, optz); err != nil {
status = http.StatusBadRequest // status is only included on error, so record how far execution got
} else if s.filterRequest(fOpts) {
return
}
}
if err == nil {
response["data"], err = respf()
status = http.StatusInternalServerError
}
if err != nil {
response["error"] = map[string]interface{}{
"code": status,
"description": err.Error(),
}
}
s.sendInternalMsgLocked(reply, _EMPTY_, server, response)
}
// remoteConnsUpdate gets called when we receive a remote update from another server.
func (s *Server) remoteConnsUpdate(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.eventsRunning() {
return
}
m := AccountNumConns{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account connection event message: %v", err)
return
}
// See if we have the account registered, if not drop it.
// Make sure this does not force us to load this account here.
var acc *Account
if v, ok := s.accounts.Load(m.Account); ok {
acc = v.(*Account)
}
// Silently ignore these if we do not have local interest in the account.
if acc == nil {
return
}
s.mu.Lock()
// check again here if we have been shutdown.
if !s.running || !s.eventsEnabled() {
s.mu.Unlock()
return
}
// Double check that this is not us, should never happen, so error if it does.
if m.Server.ID == s.info.ID {
s.sys.client.Errorf("Processing our own account connection event message: ignored")
s.mu.Unlock()
return
}
// If we are here we have interest in tracking this account. Update our accounting.
clients := acc.updateRemoteServer(&m)
s.updateRemoteServer(&m.Server)
s.mu.Unlock()
// Need to close clients outside of server lock
for _, c := range clients {
c.maxAccountConnExceeded()
}
}
// Setup tracking for this account. This allows us to track global account activity.
// Lock should be held on entry.
func (s *Server) enableAccountTracking(a *Account) {
if a == nil || !s.eventsEnabled() {
return
}
// TODO(ik): Generate payload although message may not be sent.
// May need to ensure we do so only if there is a known interest.
// This can get complicated with gateways.
subj := fmt.Sprintf(accReqSubj, a.Name, "CONNS")
reply := fmt.Sprintf(connsRespSubj, s.info.ID)
m := accNumConnsReq{Account: a.Name}
s.sendInternalMsg(subj, reply, &m.Server, &m)
}
// Event on leaf node connect.
// Lock should NOT be held on entry.
func (s *Server) sendLeafNodeConnect(a *Account) {
s.mu.Lock()
// If we are not in operator mode, or do not have any gateways defined, this should also be a no-op.
if a == nil || !s.eventsEnabled() || !s.gateway.enabled {
s.mu.Unlock()
return
}
s.sendLeafNodeConnectMsg(a.Name)
s.mu.Unlock()
s.switchAccountToInterestMode(a.Name)
}
// Send the leafnode connect message.
// Lock should be held.
func (s *Server) sendLeafNodeConnectMsg(accName string) {
subj := fmt.Sprintf(leafNodeConnectEventSubj, accName)
m := accNumConnsReq{Account: accName}
s.sendInternalMsg(subj, "", &m.Server, &m)
}
// sendAccConnsUpdate is called to send out our information on the
// account's local connections.
// Lock should be held on entry.
func (s *Server) sendAccConnsUpdate(a *Account, subj ...string) {
if !s.eventsEnabled() || a == nil {
return
}
sendQ := s.sys.sendq
if sendQ == nil {
return
}
// Build event with account name and number of local clients and leafnodes.
eid := s.nextEventID()
a.mu.Lock()
s.mu.Unlock()
localConns := a.numLocalConnections()
m := &AccountNumConns{
TypedEvent: TypedEvent{
Type: AccountNumConnsMsgType,
ID: eid,
Time: time.Now().UTC(),
},
Account: a.Name,
Conns: localConns,
LeafNodes: a.numLocalLeafNodes(),
TotalConns: localConns + a.numLocalLeafNodes(),
}
// Set timer to fire again unless we are at zero.
if localConns == 0 {
clearTimer(&a.ctmr)
} else {
// Check to see if we have an HB running and update.
if a.ctmr == nil {
a.ctmr = time.AfterFunc(eventsHBInterval, func() { s.accConnsUpdate(a) })
} else {
a.ctmr.Reset(eventsHBInterval)
}
}
for _, sub := range subj {
msg := &pubMsg{nil, sub, _EMPTY_, &m.Server, &m, false}
select {
case sendQ <- msg:
default:
a.mu.Unlock()
sendQ <- msg
a.mu.Lock()
}
}
a.mu.Unlock()
s.mu.Lock()
}
// accConnsUpdate is called whenever there is a change to the account's
// number of active connections, or during a heartbeat.
func (s *Server) accConnsUpdate(a *Account) {
s.mu.Lock()
defer s.mu.Unlock()
if !s.eventsEnabled() || a == nil {
return
}
s.sendAccConnsUpdate(a, fmt.Sprintf(accConnsEventSubjOld, a.Name), fmt.Sprintf(accConnsEventSubjNew, a.Name))
}
// server lock should be held
func (s *Server) nextEventID() string {
return s.eventIds.Next()
}
// accountConnectEvent will send an account client connect event if there is interest.
// This is a billing event.
func (s *Server) accountConnectEvent(c *client) {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
gacc := s.gacc
eid := s.nextEventID()
s.mu.Unlock()
c.mu.Lock()
// Ignore global account activity
if c.acc == nil || c.acc == gacc {
c.mu.Unlock()
return
}
m := ConnectEventMsg{
TypedEvent: TypedEvent{
Type: ConnectEventMsgType,
ID: eid,
Time: time.Now().UTC(),
},
Client: ClientInfo{
Start: &c.start,
Host: c.host,
ID: c.cid,
Account: accForClient(c),
User: c.getRawAuthUser(),
Name: c.opts.Name,
Lang: c.opts.Lang,
Version: c.opts.Version,
},
}
c.mu.Unlock()
subj := fmt.Sprintf(connectEventSubj, c.acc.Name)
s.sendInternalMsgLocked(subj, _EMPTY_, &m.Server, &m)
}
// accountDisconnectEvent will send an account client disconnect event if there is interest.
// This is a billing event.
func (s *Server) accountDisconnectEvent(c *client, now time.Time, reason string) {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
gacc := s.gacc
eid := s.nextEventID()
s.mu.Unlock()
c.mu.Lock()
// Ignore global account activity
if c.acc == nil || c.acc == gacc {
c.mu.Unlock()
return
}
m := DisconnectEventMsg{
TypedEvent: TypedEvent{
Type: DisconnectEventMsgType,
ID: eid,
Time: now.UTC(),
},
Client: ClientInfo{
Start: &c.start,
Stop: &now,
Host: c.host,
ID: c.cid,
Account: accForClient(c),
User: c.getRawAuthUser(),
Name: c.opts.Name,
Lang: c.opts.Lang,
Version: c.opts.Version,
RTT: c.getRTT(),
},
Sent: DataStats{
Msgs: atomic.LoadInt64(&c.inMsgs),
Bytes: atomic.LoadInt64(&c.inBytes),
},
Received: DataStats{
Msgs: c.outMsgs,
Bytes: c.outBytes,
},
Reason: reason,
}
accName := c.acc.Name
c.mu.Unlock()
subj := fmt.Sprintf(disconnectEventSubj, accName)
s.sendInternalMsgLocked(subj, _EMPTY_, &m.Server, &m)
}
func (s *Server) sendAuthErrorEvent(c *client) {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
eid := s.nextEventID()
s.mu.Unlock()
now := time.Now()
c.mu.Lock()
m := DisconnectEventMsg{
TypedEvent: TypedEvent{
Type: DisconnectEventMsgType,
ID: eid,
Time: now.UTC(),
},
Client: ClientInfo{
Start: &c.start,
Stop: &now,
Host: c.host,
ID: c.cid,
Account: accForClient(c),
User: c.getRawAuthUser(),
Name: c.opts.Name,
Lang: c.opts.Lang,
Version: c.opts.Version,
RTT: c.getRTT(),
},
Sent: DataStats{
Msgs: c.inMsgs,
Bytes: c.inBytes,
},
Received: DataStats{
Msgs: c.outMsgs,
Bytes: c.outBytes,
},
Reason: AuthenticationViolation.String(),
}
c.mu.Unlock()
s.mu.Lock()
subj := fmt.Sprintf(authErrorEventSubj, s.info.ID)
s.sendInternalMsg(subj, _EMPTY_, &m.Server, &m)
s.mu.Unlock()
}
// Internal message callback. If the msg is needed past the callback it is
// required to be copied.
type msgHandler func(sub *subscription, client *client, subject, reply string, msg []byte)
// Create an internal subscription. sysSubscribeQ for queue groups.
func (s *Server) sysSubscribe(subject string, cb msgHandler) (*subscription, error) {
return s.systemSubscribe(subject, _EMPTY_, false, nil, cb)
}
// Create an internal subscription with queue
func (s *Server) sysSubscribeQ(subject, queue string, cb msgHandler) (*subscription, error) {
return s.systemSubscribe(subject, queue, false, nil, cb)
}
// Create an internal subscription but do not forward interest.
func (s *Server) sysSubscribeInternal(subject string, cb msgHandler) (*subscription, error) {
return s.systemSubscribe(subject, _EMPTY_, true, nil, cb)
}
func (s *Server) systemSubscribe(subject, queue string, internalOnly bool, c *client, cb msgHandler) (*subscription, error) {
if !s.eventsEnabled() {
return nil, ErrNoSysAccount
}
if cb == nil {
return nil, fmt.Errorf("undefined message handler")
}
s.mu.Lock()
if c == nil {
c = s.sys.client
}
trace := c.trace
s.sys.sid++
sid := strconv.Itoa(s.sys.sid)
s.mu.Unlock()
// Now create the subscription
if trace {
c.traceInOp("SUB", []byte(subject+" "+queue+" "+sid))
}
var q []byte
if queue != "" {
q = []byte(queue)
}
// Now create the subscription
return c.processSub([]byte(subject), q, []byte(sid), cb, internalOnly)
}
func (s *Server) sysUnsubscribe(sub *subscription) {
if sub == nil || !s.eventsEnabled() {
return
}
s.mu.Lock()
acc := s.sys.account
c := s.sys.client
s.mu.Unlock()
c.unsubscribe(acc, sub, true, true)
}
// This will generate the tracking subject for remote latency from the response subject.
func remoteLatencySubjectForResponse(subject []byte) string {
if !isTrackedReply(subject) {
return ""
}
toks := bytes.Split(subject, []byte(tsep))
// FIXME(dlc) - Sprintf may become a performance concern at some point.
return fmt.Sprintf(remoteLatencyEventSubj, toks[len(toks)-2])
}
// remoteLatencyUpdate is used to track remote latency measurements for tracking on exported services.
func (s *Server) remoteLatencyUpdate(sub *subscription, _ *client, subject, _ string, msg []byte) {
if !s.eventsRunning() {
return
}
rl := remoteLatency{}
if err := json.Unmarshal(msg, &rl); err != nil {
s.Errorf("Error unmarshalling remote latency measurement: %v", err)
return
}
// Now we need to look up the responseServiceImport associated with this measurement.
acc, err := s.LookupAccount(rl.Account)
if err != nil {
s.Warnf("Could not lookup account %q for latency measurement", rl.Account)
return
}
// Now get the request id / reply. We need to see if we have a GW prefix and if so strip that off.
reply := rl.ReqId
if gwPrefix, old := isGWRoutedSubjectAndIsOldPrefix([]byte(reply)); gwPrefix {
reply = string(getSubjectFromGWRoutedReply([]byte(reply), old))
}
acc.mu.RLock()
si := acc.exports.responses[reply]
if si == nil {
acc.mu.RUnlock()
return
}
m1 := si.m1
m2 := rl.M2
lsub := si.latency.subject
acc.mu.RUnlock()
// So we have not processed the response tracking measurement yet.
if m1 == nil {
si.acc.mu.Lock()
// Double check since could have slipped in.
m1 = si.m1
if m1 == nil {
// Store our value there for them to pick up.
si.m1 = &m2
}
si.acc.mu.Unlock()
if m1 == nil {
return
}
}
// Calculate the correct latencies given M1 and M2.
m1.merge(&m2)
// Clear the requesting client since we send the result here.
acc.mu.Lock()
si.rc = nil
acc.mu.Unlock()
// Make sure we remove the entry here.
acc.removeServiceImport(si.from)
// Send the metrics
s.sendInternalAccountMsg(acc, lsub, m1)
}
// This is used for all inbox replies so that we do not send supercluster wide interest
// updates for every request. Same trick used in modern NATS clients.
func (s *Server) inboxReply(sub *subscription, c *client, subject, reply string, msg []byte) {
s.mu.Lock()
if !s.eventsEnabled() || s.sys.replies == nil {
s.mu.Unlock()
return
}
cb, ok := s.sys.replies[subject]
s.mu.Unlock()
if ok && cb != nil {
cb(sub, c, subject, reply, msg)
}
}
// Copied from go client.
// We could use serviceReply here instead to save some code.
// I prefer these semantics for the moment, when tracing you know what this is.
const (
InboxPrefix = "$SYS._INBOX."
inboxPrefixLen = len(InboxPrefix)
respInboxPrefixLen = inboxPrefixLen + sysHashLen + 1
replySuffixLen = 8 // Gives us 62^8
)
// Creates an internal inbox used for replies that will be processed by the global wc handler.
func (s *Server) newRespInbox() string {
var b [respInboxPrefixLen + replySuffixLen]byte
pres := b[:respInboxPrefixLen]
copy(pres, s.sys.inboxPre)
rn := rand.Int63()
for i, l := respInboxPrefixLen, rn; i < len(b); i++ {
b[i] = digits[l%base]
l /= base
}
return string(b[:])
}
// accNumSubsReq is sent when we need to gather remote info on subs.
type accNumSubsReq struct {
Account string `json:"acc"`
Subject string `json:"subject"`
Queue []byte `json:"queue,omitempty"`
}
// helper function to total information from results to count subs.
func totalSubs(rr *SublistResult, qg []byte) (nsubs int32) {
if rr == nil {
return
}
checkSub := func(sub *subscription) {
// TODO(dlc) - This could be smarter.
if qg != nil && !bytes.Equal(qg, sub.queue) {
return
}
if sub.client.kind == CLIENT || sub.client.isHubLeafNode() {
nsubs++
}
}
if qg == nil {
for _, sub := range rr.psubs {
checkSub(sub)
}
}
for _, qsub := range rr.qsubs {
for _, sub := range qsub {
checkSub(sub)
}
}
return
}
// Allows users of large systems to debug active subscribers for a given subject.
// Payload should be the subject of interest.
func (s *Server) debugSubscribers(sub *subscription, c *client, subject, reply string, rmsg []byte) {
// Even though this is an internal only subscription, meaning interest was not forwarded, we could
// get one here from a GW in optimistic mode. Ignore for now.
// FIXME(dlc) - Should we send no interest here back to the GW?
if c.kind != CLIENT {
return
}
_, acc, _, msg, err := s.getRequestInfo(c, rmsg)
if err != nil {
return
}
// We could have a single subject or we could have a subject and a wildcard separated by whitespace.
args := strings.Split(strings.TrimSpace(string(msg)), " ")
if len(args) == 0 {
s.sendInternalAccountMsg(acc, reply, 0)
return
}
tsubj := args[0]
var qgroup []byte
if len(args) > 1 {
qgroup = []byte(args[1])
}
var nsubs int32
if subjectIsLiteral(tsubj) {
// We will look up subscribers locally first then determine if we need to solicit other servers.
rr := acc.sl.Match(tsubj)
nsubs = totalSubs(rr, qgroup)
} else {
// We have a wildcard, so this is a bit slower path.
var _subs [32]*subscription
subs := _subs[:0]
acc.sl.All(&subs)
for _, sub := range subs {
if subjectIsSubsetMatch(string(sub.subject), tsubj) {
if qgroup != nil && !bytes.Equal(qgroup, sub.queue) {
continue
}
if sub.client.kind == CLIENT || sub.client.isHubLeafNode() {
nsubs++
}
}
}
}
// We should have an idea of how many responses to expect from remote servers.
var expected = acc.expectedRemoteResponses()
// If we are only local, go ahead and return.
if expected == 0 {
s.sendInternalAccountMsg(nil, reply, nsubs)
return
}
// We need to solicit from others.
// To track status.
responses := int32(0)
done := make(chan (bool))
s.mu.Lock()
// Create direct reply inbox that we multiplex under the WC replies.
replySubj := s.newRespInbox()
// Store our handler.
s.sys.replies[replySubj] = func(sub *subscription, _ *client, subject, _ string, msg []byte) {
if n, err := strconv.Atoi(string(msg)); err == nil {
atomic.AddInt32(&nsubs, int32(n))
}
if atomic.AddInt32(&responses, 1) >= expected {
select {
case done <- true:
default:
}
}
}
// Send the request to the other servers.
request := &accNumSubsReq{
Account: acc.Name,
Subject: tsubj,
Queue: qgroup,
}
s.sendInternalMsg(accNumSubsReqSubj, replySubj, nil, request)
s.mu.Unlock()
// FIXME(dlc) - We should rate limit here instead of blind Go routine.
go func() {
select {
case <-done:
case <-time.After(500 * time.Millisecond):
}
// Cleanup the WC entry.
s.mu.Lock()
delete(s.sys.replies, replySubj)
s.mu.Unlock()
// Send the response.
s.sendInternalAccountMsg(nil, reply, atomic.LoadInt32(&nsubs))
}()
}
// Request for our local subscription count. This will come from a remote origin server
// that received the initial request.
func (s *Server) nsubsRequest(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.eventsRunning() {
return
}
m := accNumSubsReq{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account nsubs request message: %v", err)
return
}
// Grab account.
acc, _ := s.lookupAccount(m.Account)
if acc == nil || acc.numLocalAndLeafConnections() == 0 {
return
}
// We will look up subscribers locally first then determine if we need to solicit other servers.
var nsubs int32
if subjectIsLiteral(m.Subject) {
rr := acc.sl.Match(m.Subject)
nsubs = totalSubs(rr, m.Queue)
} else {
// We have a wildcard, so this is a bit slower path.
var _subs [32]*subscription
subs := _subs[:0]
acc.sl.All(&subs)
for _, sub := range subs {
if (sub.client.kind == CLIENT || sub.client.isHubLeafNode()) && subjectIsSubsetMatch(string(sub.subject), m.Subject) {
if m.Queue != nil && !bytes.Equal(m.Queue, sub.queue) {
continue
}
nsubs++
}
}
}
s.sendInternalMsgLocked(reply, _EMPTY_, nil, nsubs)
}
// Helper to grab account name for a client.
func accForClient(c *client) string {
if c.acc != nil {
return c.acc.Name
}
return "N/A"
}
// Helper to clear timers.
func clearTimer(tp **time.Timer) {
if t := *tp; t != nil {
t.Stop()
*tp = nil
}
}
// Helper function to wrap functions with common test
// to lock server and return if events not enabled.
func (s *Server) wrapChk(f func()) func() {
return func() {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
f()
s.mu.Unlock()
}
}
| 1 | 12,311 | Not sure it makes sense to send the reply if we did not delete from the map, but I guess it does not hurt.. | nats-io-nats-server | go |
@@ -187,10 +187,11 @@ DataMap.prototype.getSchema = function() {
* @returns {Number} Returns number of created rows.
*/
DataMap.prototype.createRow = function(index, amount, source) {
- var row,
- colCount = this.instance.countCols(),
- numberOfCreatedRows = 0,
- currentIndex;
+ let row = null;
+ let colCount = this.instance.countCols();
+ let numberOfCreatedRows = 0;
+ let currentIndex = null;
+ let continueProcess = null;
if (!amount) {
amount = 1; | 1 | import SheetClip from './../lib/SheetClip/SheetClip.js';
import {cellMethodLookupFactory} from './helpers/data';
import {columnFactory} from './helpers/setting';
import {createObjectPropListener, duckSchema, deepExtend, deepClone, isObject, deepObjectSize, hasOwnProperty} from './helpers/object';
import {extendArray, to2dArray} from './helpers/array';
import Interval from './utils/interval';
import {rangeEach} from './helpers/number';
import MultiMap from './multiMap';
import Hooks from './pluginHooks';
/**
* Utility class that gets and saves data from/to the data source using mapping of columns numbers to object property names
* @todo refactor arguments of methods getRange, getText to be numbers (not objects)
* @todo remove priv, GridSettings from object constructor
*
* @param {Object} instance Instance of Handsontable
* @param {*} priv
* @param {*} GridSettings Grid settings
* @util
* @class DataMap
*/
function DataMap(instance, priv, GridSettings) {
this.instance = instance;
this.priv = priv;
this.GridSettings = GridSettings;
this.dataSource = this.instance.getSettings().data;
this.cachedLength = null;
this.skipCache = false;
this.latestSourceRowsCount = 0;
if (this.dataSource && this.dataSource[0]) {
this.duckSchema = this.recursiveDuckSchema(this.dataSource[0]);
} else {
this.duckSchema = {};
}
this.createMap();
this.interval = Interval.create(() => this.clearLengthCache(), '15fps');
this.instance.addHook('skipLengthCache', (delay) => this.onSkipLengthCache(delay));
this.onSkipLengthCache(500);
}
DataMap.prototype.DESTINATION_RENDERER = 1;
DataMap.prototype.DESTINATION_CLIPBOARD_GENERATOR = 2;
/**
* @param {Object|Array} object
* @returns {Object|Array}
*/
DataMap.prototype.recursiveDuckSchema = function(object) {
return duckSchema(object);
};
/**
* @param {Object} schema
* @param {Number} lastCol
* @param {Number} parent
* @returns {Number}
*/
DataMap.prototype.recursiveDuckColumns = function(schema, lastCol, parent) {
var prop,
i;
if (typeof lastCol === 'undefined') {
lastCol = 0;
parent = '';
}
if (typeof schema === 'object' && !Array.isArray(schema)) {
for (i in schema) {
if (hasOwnProperty(schema, i)) {
if (schema[i] === null) {
prop = parent + i;
this.colToPropCache.push(prop);
this.propToColCache.set(prop, lastCol);
lastCol++;
} else {
lastCol = this.recursiveDuckColumns(schema[i], lastCol, `${i}.`);
}
}
}
}
return lastCol;
};
DataMap.prototype.createMap = function() {
let i;
let schema = this.getSchema();
if (typeof schema === 'undefined') {
throw new Error('trying to create `columns` definition but you didn\'t provide `schema` nor `data`');
}
this.colToPropCache = [];
this.propToColCache = new MultiMap();
let columns = this.instance.getSettings().columns;
if (columns) {
const maxCols = this.instance.getSettings().maxCols;
let columnsLen = Math.min(maxCols, columns.length);
let filteredIndex = 0;
let columnsAsFunc = false;
let schemaLen = deepObjectSize(schema);
if (typeof columns === 'function') {
columnsLen = schemaLen > 0 ? schemaLen : this.instance.countSourceCols();
columnsAsFunc = true;
}
for (i = 0; i < columnsLen; i++) {
let column = columnsAsFunc ? columns(i) : columns[i];
if (isObject(column)) {
if (typeof column.data !== 'undefined') {
let index = columnsAsFunc ? filteredIndex : i;
this.colToPropCache[index] = column.data;
this.propToColCache.set(column.data, index);
}
filteredIndex++;
}
}
} else {
this.recursiveDuckColumns(schema);
}
};
/**
* Returns property name that corresponds with the given column index.
*
* @param {Number} col Visual column index.
* @returns {Number} Physical column index.
*/
DataMap.prototype.colToProp = function(col) {
col = this.instance.runHooks('modifyCol', col);
if (!isNaN(col) && this.colToPropCache && typeof this.colToPropCache[col] !== 'undefined') {
return this.colToPropCache[col];
}
return col;
};
/**
* @param {Object} prop
* @fires Hooks#modifyCol
* @returns {*}
*/
DataMap.prototype.propToCol = function(prop) {
var col;
if (typeof this.propToColCache.get(prop) === 'undefined') {
col = prop;
} else {
col = this.propToColCache.get(prop);
}
col = this.instance.runHooks('unmodifyCol', col);
return col;
};
/**
* @returns {Object}
*/
DataMap.prototype.getSchema = function() {
var schema = this.instance.getSettings().dataSchema;
if (schema) {
if (typeof schema === 'function') {
return schema();
}
return schema;
}
return this.duckSchema;
};
/**
* Creates row at the bottom of the data array.
*
* @param {Number} [index] Physical index of the row before which the new row will be inserted.
* @param {Number} [amount] An amount of rows to add.
* @param {String} [source] Source of method call.
* @fires Hooks#afterCreateRow
* @returns {Number} Returns number of created rows.
*/
DataMap.prototype.createRow = function(index, amount, source) {
var row,
colCount = this.instance.countCols(),
numberOfCreatedRows = 0,
currentIndex;
if (!amount) {
amount = 1;
}
if (typeof index !== 'number' || index >= this.instance.countSourceRows()) {
index = this.instance.countSourceRows();
}
this.instance.runHooks('beforeCreateRow', index, amount, source);
currentIndex = index;
var maxRows = this.instance.getSettings().maxRows;
while (numberOfCreatedRows < amount && this.instance.countSourceRows() < maxRows) {
if (this.instance.dataType === 'array') {
if (this.instance.getSettings().dataSchema) {
// Clone template array
row = deepClone(this.getSchema());
} else {
row = [];
/* eslint-disable no-loop-func */
rangeEach(colCount - 1, () => row.push(null));
}
} else if (this.instance.dataType === 'function') {
row = this.instance.getSettings().dataSchema(index);
} else {
row = {};
deepExtend(row, this.getSchema());
}
if (index === this.instance.countSourceRows()) {
this.dataSource.push(row);
} else {
this.spliceData(index, 0, row);
}
numberOfCreatedRows++;
currentIndex++;
}
this.instance.runHooks('afterCreateRow', index, numberOfCreatedRows, source);
this.instance.forceFullRender = true; // used when data was changed
return numberOfCreatedRows;
};
/**
* Creates col at the right of the data array.
*
* @param {Number} [index] Visual index of the column before which the new column will be inserted
* @param {Number} [amount] An amount of columns to add.
* @param {String} [source] Source of method call.
* @fires Hooks#afterCreateCol
* @returns {Number} Returns number of created columns
*/
DataMap.prototype.createCol = function(index, amount, source) {
if (!this.instance.isColumnModificationAllowed()) {
throw new Error('Cannot create new column. When data source in an object, ' +
'you can only have as much columns as defined in first data row, data schema or in the \'columns\' setting.' +
'If you want to be able to add new columns, you have to use array datasource.');
}
var rlen = this.instance.countSourceRows(),
data = this.dataSource,
constructor,
numberOfCreatedCols = 0,
currentIndex;
if (!amount) {
amount = 1;
}
if (typeof index !== 'number' || index >= this.instance.countCols()) {
index = this.instance.countCols();
}
this.instance.runHooks('beforeCreateCol', index, amount, source);
currentIndex = index;
var maxCols = this.instance.getSettings().maxCols;
while (numberOfCreatedCols < amount && this.instance.countCols() < maxCols) {
constructor = columnFactory(this.GridSettings, this.priv.columnsSettingConflicts);
if (typeof index !== 'number' || index >= this.instance.countCols()) {
if (rlen > 0) {
for (var r = 0; r < rlen; r++) {
if (typeof data[r] === 'undefined') {
data[r] = [];
}
data[r].push(null);
}
} else {
data.push([null]);
}
// Add new column constructor
this.priv.columnSettings.push(constructor);
} else {
for (let r = 0; r < rlen; r++) {
data[r].splice(currentIndex, 0, null);
}
// Add new column constructor at given index
this.priv.columnSettings.splice(currentIndex, 0, constructor);
}
numberOfCreatedCols++;
currentIndex++;
}
this.instance.runHooks('afterCreateCol', index, numberOfCreatedCols, source);
this.instance.forceFullRender = true; // used when data was changed
return numberOfCreatedCols;
};
/**
* Removes row from the data array.
*
* @param {Number} [index] Visual index of the row to be removed. If not provided, the last row will be removed
* @param {Number} [amount] Amount of the rows to be removed. If not provided, one row will be removed
* @param {String} [source] Source of method call.
* @fires Hooks#beforeRemoveRow
* @fires Hooks#afterRemoveRow
*/
DataMap.prototype.removeRow = function(index, amount, source) {
if (!amount) {
amount = 1;
}
if (typeof index !== 'number') {
index = -amount;
}
amount = this.instance.runHooks('modifyRemovedAmount', amount, index);
index = (this.instance.countSourceRows() + index) % this.instance.countSourceRows();
let logicRows = this.visualRowsToPhysical(index, amount);
let actionWasNotCancelled = this.instance.runHooks('beforeRemoveRow', index, amount, logicRows, source);
if (actionWasNotCancelled === false) {
return;
}
let data = this.dataSource;
let newData;
newData = this.filterData(index, amount);
if (newData) {
data.length = 0;
Array.prototype.push.apply(data, newData);
}
this.instance.runHooks('afterRemoveRow', index, amount, logicRows, source);
this.instance.forceFullRender = true; // used when data was changed
};
/**
* Removes column from the data array.
*
* @param {Number} [index] Visual index of the column to be removed. If not provided, the last column will be removed
* @param {Number} [amount] Amount of the columns to be removed. If not provided, one column will be removed
* @param {String} [source] Source of method call.
* @fires Hooks#beforeRemoveCol
* @fires Hooks#afterRemoveCol
*/
DataMap.prototype.removeCol = function(index, amount, source) {
if (this.instance.dataType === 'object' || this.instance.getSettings().columns) {
throw new Error('cannot remove column with object data source or columns option specified');
}
if (!amount) {
amount = 1;
}
if (typeof index !== 'number') {
index = -amount;
}
index = (this.instance.countCols() + index) % this.instance.countCols();
let logicColumns = this.visualColumnsToPhysical(index, amount);
let descendingLogicColumns = logicColumns.slice(0).sort((a, b) => b - a);
let actionWasNotCancelled = this.instance.runHooks('beforeRemoveCol', index, amount, logicColumns, source);
if (actionWasNotCancelled === false) {
return;
}
let isTableUniform = true;
let removedColumnsCount = descendingLogicColumns.length;
let data = this.dataSource;
for (let c = 0; c < removedColumnsCount; c++) {
if (isTableUniform && logicColumns[0] !== logicColumns[c] - c) {
isTableUniform = false;
}
}
if (isTableUniform) {
for (let r = 0, rlen = this.instance.countSourceRows(); r < rlen; r++) {
data[r].splice(logicColumns[0], amount);
}
} else {
for (let r = 0, rlen = this.instance.countSourceRows(); r < rlen; r++) {
for (let c = 0; c < removedColumnsCount; c++) {
data[r].splice(descendingLogicColumns[c], 1);
}
}
for (let c = 0; c < removedColumnsCount; c++) {
this.priv.columnSettings.splice(logicColumns[c], 1);
}
}
this.instance.runHooks('afterRemoveCol', index, amount, logicColumns, source);
this.instance.forceFullRender = true; // used when data was changed
};
/**
* Add/Removes data from the column.
*
* @param {Number} col Physical index of column in which do you want to do splice
* @param {Number} index Index at which to start changing the array. If negative, will begin that many elements from the end
* @param {Number} amount An integer indicating the number of old array elements to remove. If amount is 0, no elements are removed
* @returns {Array} Returns removed portion of columns
*/
DataMap.prototype.spliceCol = function(col, index, amount/* , elements... */) {
var elements = arguments.length >= 4 ? [].slice.call(arguments, 3) : [];
var colData = this.instance.getDataAtCol(col);
var removed = colData.slice(index, index + amount);
var after = colData.slice(index + amount);
extendArray(elements, after);
var i = 0;
while (i < amount) {
elements.push(null); // add null in place of removed elements
i++;
}
to2dArray(elements);
this.instance.populateFromArray(index, col, elements, null, null, 'spliceCol');
return removed;
};
/**
* Add/Removes data from the row.
*
* @param {Number} row Physical index of row in which do you want to do splice
* @param {Number} index Index at which to start changing the array. If negative, will begin that many elements from the end.
* @param {Number} amount An integer indicating the number of old array elements to remove. If amount is 0, no elements are removed.
* @returns {Array} Returns removed portion of rows
*/
DataMap.prototype.spliceRow = function(row, index, amount/* , elements... */) {
var elements = arguments.length >= 4 ? [].slice.call(arguments, 3) : [];
var rowData = this.instance.getSourceDataAtRow(row);
var removed = rowData.slice(index, index + amount);
var after = rowData.slice(index + amount);
extendArray(elements, after);
var i = 0;
while (i < amount) {
elements.push(null); // add null in place of removed elements
i++;
}
this.instance.populateFromArray(row, index, [elements], null, null, 'spliceRow');
return removed;
};
/**
* Add/remove row(s) to/from the data source.
*
* @param {Number} index Physical index of the element to remove.
* @param {Number} amount Number of rows to add/remove.
* @param {Object} element Row to add.
*/
DataMap.prototype.spliceData = function(index, amount, element) {
let continueSplicing = this.instance.runHooks('beforeDataSplice', index, amount, element);
if (continueSplicing !== false) {
this.dataSource.splice(index, amount, element);
}
};
/**
* Filter unwanted data elements from the data source.
*
* @param {Number} index Visual index of the element to remove.
* @param {Number} amount Number of rows to add/remove.
* @returns {Array}
*/
DataMap.prototype.filterData = function(index, amount) {
let physicalRows = this.visualRowsToPhysical(index, amount);
let continueSplicing = this.instance.runHooks('beforeDataFilter', index, amount, physicalRows);
if (continueSplicing !== false) {
let newData = this.dataSource.filter((row, index) => physicalRows.indexOf(index) == -1);
return newData;
}
};
/**
* Returns single value from the data array.
*
* @param {Number} row Visual row index.
* @param {Number} prop
*/
DataMap.prototype.get = function(row, prop) {
row = this.instance.runHooks('modifyRow', row);
let dataRow = this.dataSource[row];
// TODO: To remove, use 'modifyData' hook instead (see below)
let modifiedRowData = this.instance.runHooks('modifyRowData', row);
dataRow = isNaN(modifiedRowData) ? modifiedRowData : dataRow;
//
let value = null;
// try to get value under property `prop` (includes dot)
if (dataRow && dataRow.hasOwnProperty && hasOwnProperty(dataRow, prop)) {
value = dataRow[prop];
} else if (typeof prop === 'string' && prop.indexOf('.') > -1) {
let sliced = prop.split('.');
let out = dataRow;
if (!out) {
return null;
}
for (let i = 0, ilen = sliced.length; i < ilen; i++) {
out = out[sliced[i]];
if (typeof out === 'undefined') {
return null;
}
}
value = out;
} else if (typeof prop === 'function') {
/**
* allows for interacting with complex structures, for example
* d3/jQuery getter/setter properties:
*
* {columns: [{
* data: function(row, value){
* if(arguments.length === 1){
* return row.property();
* }
* row.property(value);
* }
* }]}
*/
value = prop(this.dataSource.slice(row, row + 1)[0]);
}
if (this.instance.hasHook('modifyData')) {
const valueHolder = createObjectPropListener(value);
this.instance.runHooks('modifyData', row, this.propToCol(prop), valueHolder, 'get');
if (valueHolder.isTouched()) {
value = valueHolder.value;
}
}
return value;
};
var copyableLookup = cellMethodLookupFactory('copyable', false);
/**
* Returns single value from the data array (intended for clipboard copy to an external application).
*
* @param {Number} row Physical row index.
* @param {Number} prop
* @returns {String}
*/
DataMap.prototype.getCopyable = function(row, prop) {
if (copyableLookup.call(this.instance, row, this.propToCol(prop))) {
return this.get(row, prop);
}
return '';
};
/**
* Saves single value to the data array.
*
* @param {Number} row Visual row index.
* @param {Number} prop
* @param {String} value
* @param {String} [source] Source of hook runner.
*/
DataMap.prototype.set = function(row, prop, value, source) {
row = this.instance.runHooks('modifyRow', row, source || 'datamapGet');
let dataRow = this.dataSource[row];
// TODO: To remove, use 'modifyData' hook instead (see below)
let modifiedRowData = this.instance.runHooks('modifyRowData', row);
dataRow = isNaN(modifiedRowData) ? modifiedRowData : dataRow;
//
if (this.instance.hasHook('modifyData')) {
const valueHolder = createObjectPropListener(value);
this.instance.runHooks('modifyData', row, this.propToCol(prop), valueHolder, 'set');
if (valueHolder.isTouched()) {
value = valueHolder.value;
}
}
// try to set value under property `prop` (includes dot)
if (dataRow && dataRow.hasOwnProperty && hasOwnProperty(dataRow, prop)) {
dataRow[prop] = value;
} else if (typeof prop === 'string' && prop.indexOf('.') > -1) {
let sliced = prop.split('.');
let out = dataRow;
let i = 0;
let ilen;
for (i = 0, ilen = sliced.length - 1; i < ilen; i++) {
if (typeof out[sliced[i]] === 'undefined') {
out[sliced[i]] = {};
}
out = out[sliced[i]];
}
out[sliced[i]] = value;
} else if (typeof prop === 'function') {
/* see the `function` handler in `get` */
prop(this.dataSource.slice(row, row + 1)[0], value);
} else {
dataRow[prop] = value;
}
};
/**
* This ridiculous piece of code maps rows Id that are present in table data to those displayed for user.
* The trick is, the physical row id (stored in settings.data) is not necessary the same
* as the visual (displayed) row id (e.g. when sorting is applied).
*
* @param {Number} index Visual row index.
* @param {Number} amount
* @fires Hooks#modifyRow
* @returns {Number}
*/
DataMap.prototype.visualRowsToPhysical = function(index, amount) {
var totalRows = this.instance.countSourceRows();
var physicRow = (totalRows + index) % totalRows;
var logicRows = [];
var rowsToRemove = amount;
var row;
while (physicRow < totalRows && rowsToRemove) {
row = this.instance.runHooks('modifyRow', physicRow);
logicRows.push(row);
rowsToRemove--;
physicRow++;
}
return logicRows;
};
/**
*
* @param index Visual column index.
* @param amount
* @returns {Array}
*/
DataMap.prototype.visualColumnsToPhysical = function(index, amount) {
let totalCols = this.instance.countCols();
let physicalCol = (totalCols + index) % totalCols;
let visualCols = [];
let colsToRemove = amount;
while (physicalCol < totalCols && colsToRemove) {
let col = this.instance.runHooks('modifyCol', physicalCol);
visualCols.push(col);
colsToRemove--;
physicalCol++;
}
return visualCols;
};
/**
* Clears the data array.
*/
DataMap.prototype.clear = function() {
for (var r = 0; r < this.instance.countSourceRows(); r++) {
for (var c = 0; c < this.instance.countCols(); c++) {
this.set(r, this.colToProp(c), '');
}
}
};
/**
* Clear cached data length.
*/
DataMap.prototype.clearLengthCache = function() {
this.cachedLength = null;
};
/**
* Get data length.
*
* @returns {Number}
*/
DataMap.prototype.getLength = function() {
let maxRows,
maxRowsFromSettings = this.instance.getSettings().maxRows;
if (maxRowsFromSettings < 0 || maxRowsFromSettings === 0) {
maxRows = 0;
} else {
maxRows = maxRowsFromSettings || Infinity;
}
let length = this.instance.countSourceRows();
if (this.instance.hasHook('modifyRow')) {
let reValidate = this.skipCache;
this.interval.start();
if (length !== this.latestSourceRowsCount) {
reValidate = true;
}
this.latestSourceRowsCount = length;
if (this.cachedLength === null || reValidate) {
rangeEach(length - 1, (row) => {
row = this.instance.runHooks('modifyRow', row);
if (row === null) {
--length;
}
});
this.cachedLength = length;
} else {
length = this.cachedLength;
}
} else {
this.interval.stop();
}
return Math.min(length, maxRows);
};
/**
* Returns the data array.
*
* @returns {Array}
*/
DataMap.prototype.getAll = function() {
const start = {
row: 0,
col: 0,
};
let end = {
row: Math.max(this.instance.countSourceRows() - 1, 0),
col: Math.max(this.instance.countCols() - 1, 0),
};
if (start.row - end.row === 0 && !this.instance.countSourceRows()) {
return [];
}
return this.getRange(start, end, DataMap.prototype.DESTINATION_RENDERER);
};
/**
* Returns data range as array.
*
* @param {Object} [start] Start selection position. Visual indexes.
* @param {Object} [end] End selection position. Visual indexes.
* @param {Number} destination Destination of datamap.get
* @returns {Array}
*/
DataMap.prototype.getRange = function(start, end, destination) {
var r,
rlen,
c,
clen,
output = [],
row;
const maxRows = this.instance.getSettings().maxRows;
const maxCols = this.instance.getSettings().maxCols;
if (maxRows === 0 || maxCols === 0) {
return [];
}
var getFn = destination === this.DESTINATION_CLIPBOARD_GENERATOR ? this.getCopyable : this.get;
rlen = Math.min(Math.max(maxRows - 1, 0), Math.max(start.row, end.row));
clen = Math.min(Math.max(maxCols - 1, 0), Math.max(start.col, end.col));
for (r = Math.min(start.row, end.row); r <= rlen; r++) {
row = [];
let physicalRow = this.instance.runHooks('modifyRow', r);
for (c = Math.min(start.col, end.col); c <= clen; c++) {
if (physicalRow === null) {
break;
}
row.push(getFn.call(this, r, this.colToProp(c)));
}
if (physicalRow !== null) {
output.push(row);
}
}
return output;
};
/**
* Return data as text (tab separated columns).
*
* @param {Object} [start] Start selection position. Visual indexes.
* @param {Object} [end] End selection position. Visual indexes.
* @returns {String}
*/
DataMap.prototype.getText = function(start, end) {
return SheetClip.stringify(this.getRange(start, end, this.DESTINATION_RENDERER));
};
/**
* Return data as copyable text (tab separated columns intended for clipboard copy to an external application).
*
* @param {Object} [start] Start selection position. Visual indexes.
* @param {Object} [end] End selection position. Visual indexes.
* @returns {String}
*/
DataMap.prototype.getCopyableText = function(start, end) {
return SheetClip.stringify(this.getRange(start, end, this.DESTINATION_CLIPBOARD_GENERATOR));
};
/**
* `skipLengthCache` callback.
* @private
* @param {Number} delay Time of the delay in milliseconds.
*/
DataMap.prototype.onSkipLengthCache = function(delay) {
this.skipCache = true;
setTimeout(() => {
this.skipCache = false;
}, delay);
};
/**
* Destroy instance.
*/
DataMap.prototype.destroy = function() {
this.interval.stop();
this.interval = null;
this.instance = null;
this.priv = null;
this.GridSettings = null;
this.dataSource = null;
this.cachedLength = null;
this.duckSchema = null;
};
export default DataMap;
| 1 | 14,515 | Most of these variables can be moved after `return` or to the body of `if` statement. | handsontable-handsontable | js |
@@ -115,6 +115,10 @@ public enum JsonRpcError {
DECODE_ERROR(-50100, "Unable to decode the private signed raw transaction"),
GET_PRIVATE_TRANSACTION_NONCE_ERROR(-50100, "Unable to determine nonce for account in group."),
+ // Privacy multi-tenancy errors
+ PRIVACY_MULTI_TENANCY_NO_TOKEN(-50100, "No token provided"),
+ PRIVACY_MULTI_TENANCY_NO_ENCLAVE_PUBLIC_KEY(-50100, "No privacy enclave public key"),
+
CANT_CONNECT_TO_LOCAL_PEER(-32100, "Cannot add local node as peer."),
// Invalid input errors | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.response;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonFormat;
import com.fasterxml.jackson.annotation.JsonGetter;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonFormat(shape = JsonFormat.Shape.OBJECT)
public enum JsonRpcError {
// Standard errors
PARSE_ERROR(-32700, "Parse error"),
INVALID_REQUEST(-32600, "Invalid Request"),
METHOD_NOT_FOUND(-32601, "Method not found"),
INVALID_PARAMS(-32602, "Invalid params"),
INTERNAL_ERROR(-32603, "Internal error"),
METHOD_NOT_ENABLED(-32604, "Method not enabled"),
// eth_sendTransaction specific error message
ETH_SEND_TX_NOT_AVAILABLE(
-32604,
"The method eth_sendTransaction is not supported. Use eth_sendRawTransaction to send a signed transaction to Besu."),
// P2P related errors
P2P_DISABLED(-32000, "P2P has been disabled. This functionality is not available"),
P2P_NETWORK_NOT_RUNNING(-32000, "P2P network is not running"),
// Filter & Subscription Errors
FILTER_NOT_FOUND(-32000, "Filter not found"),
LOGS_FILTER_NOT_FOUND(-32000, "Logs filter not found"),
SUBSCRIPTION_NOT_FOUND(-32000, "Subscription not found"),
NO_MINING_WORK_FOUND(-32000, "No mining work available yet"),
// Transaction validation failures
NONCE_TOO_LOW(-32001, "Nonce too low"),
INVALID_TRANSACTION_SIGNATURE(-32002, "Invalid signature"),
INTRINSIC_GAS_EXCEEDS_LIMIT(-32003, "Intrinsic gas exceeds gas limit"),
TRANSACTION_UPFRONT_COST_EXCEEDS_BALANCE(-32004, "Upfront cost exceeds account balance"),
EXCEEDS_BLOCK_GAS_LIMIT(-32005, "Transaction gas limit exceeds block gas limit"),
INCORRECT_NONCE(-32006, "Incorrect nonce"),
TX_SENDER_NOT_AUTHORIZED(-32007, "Sender account not authorized to send transactions"),
CHAIN_HEAD_WORLD_STATE_NOT_AVAILABLE(-32008, "Initial sync is still in progress"),
GAS_PRICE_TOO_LOW(-32009, "Gas price below configured minimum gas price"),
// Miner failures
COINBASE_NOT_SET(-32010, "Coinbase not set. Unable to start mining without a coinbase"),
NO_HASHES_PER_SECOND(-32011, "No hashes being generated by the current node"),
// Wallet errors
COINBASE_NOT_SPECIFIED(-32000, "Coinbase must be explicitly specified"),
// Account errors
NO_ACCOUNT_FOUND(-32000, "Account not found"),
// Worldstate erros
WORLD_STATE_UNAVAILABLE(-32000, "World state unavailable"),
// Debug failures
PARENT_BLOCK_NOT_FOUND(-32000, "Parent block not found"),
// Permissioning/Account whitelist errors
ACCOUNT_WHITELIST_NOT_ENABLED(-32000, "Account whitelisting has not been enabled"),
ACCOUNT_WHITELIST_EMPTY_ENTRY(-32000, "Request contains an empty list of accounts"),
ACCOUNT_WHITELIST_INVALID_ENTRY(-32000, "Request contains an invalid account"),
ACCOUNT_WHITELIST_DUPLICATED_ENTRY(-32000, "Request contains duplicate accounts"),
ACCOUNT_WHITELIST_EXISTING_ENTRY(-32000, "Cannot add an existing account to whitelist"),
ACCOUNT_WHITELIST_ABSENT_ENTRY(-32000, "Cannot remove an absent account from whitelist"),
// Permissioning/Node whitelist errors
NODE_WHITELIST_NOT_ENABLED(-32000, "Node whitelisting has not been enabled"),
NODE_WHITELIST_EMPTY_ENTRY(-32000, "Request contains an empty list of nodes"),
NODE_WHITELIST_INVALID_ENTRY(-32000, "Request contains an invalid node"),
NODE_WHITELIST_DUPLICATED_ENTRY(-32000, "Request contains duplicate nodes"),
NODE_WHITELIST_EXISTING_ENTRY(-32000, "Cannot add an existing node to whitelist"),
NODE_WHITELIST_MISSING_ENTRY(-32000, "Cannot remove an absent node from whitelist"),
NODE_WHITELIST_FIXED_NODE_CANNOT_BE_REMOVED(
-32000, "Cannot remove a fixed node (bootnode or static node) from whitelist"),
// Permissioning/persistence errors
WHITELIST_PERSIST_FAILURE(
-32000, "Unable to persist changes to whitelist configuration file. Changes reverted"),
WHITELIST_FILE_SYNC(
-32000,
"The permissioning whitelist configuration file is out of sync. The changes have been applied, but not persisted to disk"),
WHITELIST_RELOAD_ERROR(
-32000,
"Error reloading permissions file. Please use perm_getAccountsWhitelist and perm_getNodesWhitelist to review the current state of the whitelists"),
PERMISSIONING_NOT_ENABLED(-32000, "Node/Account whitelisting has not been enabled"),
NON_PERMITTED_NODE_CANNOT_BE_ADDED_AS_A_PEER(-32000, "Cannot add a non-permitted node as a peer"),
// Permissioning/Authorization errors
UNAUTHORIZED(-40100, "Unauthorized"),
// Private transaction errors
ENCLAVE_ERROR(-50100, "Error communicating with enclave"),
UNIMPLEMENTED_PRIVATE_TRANSACTION_TYPE(-50100, "Unimplemented private transaction type"),
PRIVACY_NOT_ENABLED(-50100, "Privacy is not enabled"),
CREATE_PRIVACY_GROUP_ERROR(-50100, "Error creating privacy group"),
DELETE_PRIVACY_GROUP_ERROR(-50100, "Error deleting privacy group"),
FIND_PRIVACY_GROUP_ERROR(-50100, "Error finding privacy group"),
VALUE_NOT_ZERO(-50100, "We cannot transfer ether in private transaction yet."),
DECODE_ERROR(-50100, "Unable to decode the private signed raw transaction"),
GET_PRIVATE_TRANSACTION_NONCE_ERROR(-50100, "Unable to determine nonce for account in group."),
CANT_CONNECT_TO_LOCAL_PEER(-32100, "Cannot add local node as peer."),
// Invalid input errors
ENODE_ID_INVALID(
-32000,
"Invalid node ID: node ID must have exactly 128 hexadecimal characters and should not include any '0x' hex prefix."),
// Enclave errors
NODE_MISSING_PEER_URL(-50200, "NodeMissingPeerUrl"),
NODE_PUSHING_TO_PEER(-50200, "NodePushingToPeer"),
NODE_PROPAGATING_TO_ALL_PEERS(-50200, "NodePropagatingToAllPeers"),
NO_SENDER_KEY(-50200, "NoSenderKey"),
INVALID_PAYLOAD(-50200, "InvalidPayload"),
ENCLAVE_CREATE_KEY_PAIR(-50200, "EnclaveCreateKeyPair"),
ENCLAVE_DECODE_PUBLIC_KEY(-50200, "EnclaveDecodePublicKey"),
ENCLAVE_DECRYPT_WRONG_PRIVATE_KEY(-50200, "EnclaveDecryptWrongPrivateKey"),
ENCLAVE_ENCRYPT_COMBINE_KEYS(-50200, "EnclaveEncryptCombineKeys"),
ENCLAVE_MISSING_PRIVATE_KEY_PASSWORD(-50200, "EnclaveMissingPrivateKeyPasswords"),
ENCLAVE_NO_MATCHING_PRIVATE_KEY(-50200, "EnclaveNoMatchingPrivateKey"),
ENCLAVE_NOT_PAYLOAD_OWNER(-50200, "EnclaveNotPayloadOwner"),
ENCLAVE_UNSUPPORTED_PRIVATE_KEY_TYPE(-50200, "EnclaveUnsupportedPrivateKeyType"),
ENCLAVE_STORAGE_DECRYPT(-50200, "EnclaveStorageDecrypt"),
ENCLAVE_PRIVACY_GROUP_CREATION(-50200, "EnclavePrivacyGroupIdCreation"),
ENCLAVE_PAYLOAD_NOT_FOUND(-50200, "EnclavePayloadNotFound"),
CREATE_GROUP_INCLUDE_SELF(-50200, "CreatePrivacyGroupShouldIncludeSelf"),
/** Storing privacy group issue */
ENCLAVE_UNABLE_STORE_PRIVACY_GROUP(-50200, "PrivacyGroupNotStored"),
ENCLAVE_UNABLE_DELETE_PRIVACY_GROUP(-50200, "PrivacyGroupNotDeleted"),
ENCLAVE_UNABLE_PUSH_DELETE_PRIVACY_GROUP(-50200, "PrivacyGroupNotPushed"),
ENCLAVE_PRIVACY_GROUP_MISSING(-50200, "PrivacyGroupNotFound"),
ENCLAVE_PRIVACY_QUERY_ERROR(-50200, "PrivacyGroupQueryError"),
ENCLAVE_KEYS_CANNOT_DECRYPT_PAYLOAD(-50200, "EnclaveKeysCannotDecryptPayload"),
METHOD_UNIMPLEMENTED(-50200, "MethodUnimplemented");
private final int code;
private final String message;
JsonRpcError(final int code, final String message) {
this.code = code;
this.message = message;
}
@JsonGetter("code")
public int getCode() {
return code;
}
@JsonGetter("message")
public String getMessage() {
return message;
}
@JsonCreator
public static JsonRpcError fromJson(
@JsonProperty("code") final int code, @JsonProperty("message") final String message) {
for (final JsonRpcError error : JsonRpcError.values()) {
if (error.code == code && error.message.equals(message)) {
return error;
}
}
return null;
}
}
| 1 | 21,002 | nit: Is this actually just a http "unauthorised" response? Or is that too vague? If this is the right way to go ... is "token" the right word" Or do we want to say something else? | hyperledger-besu | java |
@@ -247,7 +247,7 @@ module Bolt
def puppetdb_client
return @puppetdb_client if @puppetdb_client
- puppetdb_config = Bolt::PuppetDB::Config.load_config(nil, config.puppetdb)
+ puppetdb_config = Bolt::PuppetDB::Config.load_config(nil, config.puppetdb, config.boltdir.path)
@puppetdb_client = Bolt::PuppetDB::Client.new(puppetdb_config)
end
| 1 | # frozen_string_literal: true
# Avoid requiring the CLI from other files. It has side-effects - such as loading r10k -
# that are undesirable when using Bolt as a library.
require 'uri'
require 'benchmark'
require 'json'
require 'io/console'
require 'logging'
require 'optparse'
require 'bolt/analytics'
require 'bolt/bolt_option_parser'
require 'bolt/config'
require 'bolt/error'
require 'bolt/executor'
require 'bolt/inventory'
require 'bolt/rerun'
require 'bolt/logger'
require 'bolt/outputter'
require 'bolt/puppetdb'
require 'bolt/plugin'
require 'bolt/pal'
require 'bolt/target'
require 'bolt/version'
require 'bolt/secret'
module Bolt
class CLIExit < StandardError; end
class CLI
COMMANDS = { 'command' => %w[run],
'script' => %w[run],
'task' => %w[show run],
'plan' => %w[show run convert],
'file' => %w[upload],
'puppetfile' => %w[install show-modules generate-types],
'secret' => %w[encrypt decrypt createkeys],
'inventory' => %w[show],
'group' => %w[show],
'project' => %w[init],
'apply' => %w[] }.freeze
attr_reader :config, :options
def initialize(argv)
Bolt::Logger.initialize_logging
@logger = Logging.logger[self]
@argv = argv
@config = Bolt::Config.default
@options = {}
end
# Only call after @config has been initialized.
def inventory
@inventory ||= Bolt::Inventory.from_config(config, plugins)
end
private :inventory
def help?(remaining)
# Set the subcommand
options[:subcommand] = remaining.shift
if options[:subcommand] == 'help'
options[:help] = true
options[:subcommand] = remaining.shift
end
# This section handles parsing non-flag options which are
# subcommand specific rather then part of the config
actions = COMMANDS[options[:subcommand]]
if actions && !actions.empty?
options[:action] = remaining.shift
end
options[:help]
end
private :help?
def parse
parser = BoltOptionParser.new(options)
# This part aims to handle both `bolt <mode> --help` and `bolt help <mode>`.
remaining = handle_parser_errors { parser.permute(@argv) } unless @argv.empty?
if @argv.empty? || help?(remaining)
# Update the parser for the subcommand (or lack thereof)
parser.update
puts parser.help
raise Bolt::CLIExit
end
options[:object] = remaining.shift
# Only parse task_options for task or plan
if %w[task plan].include?(options[:subcommand])
task_options, remaining = remaining.partition { |s| s =~ /.+=/ }
if options[:task_options]
unless task_options.empty?
raise Bolt::CLIError,
"Parameters must be specified through either the --params " \
"option or param=value pairs, not both"
end
options[:params_parsed] = true
else
options[:params_parsed] = false
options[:task_options] = Hash[task_options.map { |a| a.split('=', 2) }]
end
end
options[:leftovers] = remaining
validate(options)
@config = if options[:configfile]
Bolt::Config.from_file(options[:configfile], options)
else
boltdir = if options[:boltdir]
Bolt::Boltdir.new(options[:boltdir])
else
Bolt::Boltdir.find_boltdir(Dir.pwd)
end
Bolt::Config.from_boltdir(boltdir, options)
end
# Set $future global if configured
# rubocop:disable Style/GlobalVars
$future = @config.future
# rubocop:enable Style/GlobalVars
Bolt::Logger.configure(config.log, config.color)
# Logger must be configured before checking path case, otherwise warnings will not display
@config.check_path_case('modulepath', @config.modulepath)
# After validation, initialize inventory and targets. Errors here are better to catch early.
# After this step
# options[:target_args] will contain a string/array version of the targetting options this is passed to plans
# options[:targets] will contain a resolved set of Target objects
unless options[:subcommand] == 'puppetfile' ||
options[:subcommand] == 'secret' ||
options[:subcommand] == 'project' ||
options[:action] == 'show' ||
options[:action] == 'convert'
update_targets(options)
end
unless options.key?(:verbose)
# Default to verbose for everything except plans
options[:verbose] = options[:subcommand] != 'plan'
end
warn_inventory_overrides_cli(options)
options
rescue Bolt::Error => e
outputter.fatal_error(e)
raise e
end
def update_targets(options)
target_opts = options.keys.select { |opt| %i[query rerun nodes targets].include?(opt) }
target_string = "'--nodes', '--targets', '--rerun', or '--query'"
if target_opts.length > 1
raise Bolt::CLIError, "Only one targeting option #{target_string} may be specified"
elsif target_opts.empty? && options[:subcommand] != 'plan'
raise Bolt::CLIError, "Command requires a targeting option: #{target_string}"
end
nodes = if options[:query]
query_puppetdb_nodes(options[:query])
elsif options[:rerun]
rerun.get_targets(options[:rerun])
else
options[:targets] || options[:nodes] || []
end
options[:target_args] = nodes
options[:targets] = inventory.get_targets(nodes)
end
def validate(options)
unless COMMANDS.include?(options[:subcommand])
raise Bolt::CLIError,
"Expected subcommand '#{options[:subcommand]}' to be one of " \
"#{COMMANDS.keys.join(', ')}"
end
actions = COMMANDS[options[:subcommand]]
if actions.any?
if options[:action].nil?
raise Bolt::CLIError,
"Expected an action of the form 'bolt #{options[:subcommand]} <action>'"
end
unless actions.include?(options[:action])
raise Bolt::CLIError,
"Expected action '#{options[:action]}' to be one of " \
"#{actions.join(', ')}"
end
end
if options[:subcommand] != 'file' && options[:subcommand] != 'script' &&
!options[:leftovers].empty?
raise Bolt::CLIError,
"Unknown argument(s) #{options[:leftovers].join(', ')}"
end
if %w[task plan].include?(options[:subcommand]) && options[:action] == 'run'
if options[:object].nil?
raise Bolt::CLIError, "Must specify a #{options[:subcommand]} to run"
end
# This may mean that we parsed a parameter as the object
unless options[:object] =~ /\A([a-z][a-z0-9_]*)?(::[a-z][a-z0-9_]*)*\Z/
raise Bolt::CLIError,
"Invalid #{options[:subcommand]} '#{options[:object]}'"
end
end
if options[:boltdir] && options[:configfile]
raise Bolt::CLIError, "Only one of '--boltdir' or '--configfile' may be specified"
end
if options[:noop] &&
!(options[:subcommand] == 'task' && options[:action] == 'run') && options[:subcommand] != 'apply'
raise Bolt::CLIError,
"Option '--noop' may only be specified when running a task or applying manifest code"
end
if options[:subcommand] == 'apply' && (options[:object] && options[:code])
raise Bolt::CLIError, "--execute is unsupported when specifying a manifest file"
end
if options[:subcommand] == 'apply' && (!options[:object] && !options[:code])
raise Bolt::CLIError, "a manifest file or --execute is required"
end
if options[:subcommand] == 'command' && (!options[:object] || options[:object].empty?)
raise Bolt::CLIError, "Must specify a command to run"
end
end
def handle_parser_errors
yield
rescue OptionParser::MissingArgument => e
raise Bolt::CLIError, "Option '#{e.args.first}' needs a parameter"
rescue OptionParser::InvalidArgument => e
raise Bolt::CLIError, "Invalid parameter specified for option '#{e.args.first}': #{e.args[1]}"
rescue OptionParser::InvalidOption, OptionParser::AmbiguousOption => e
raise Bolt::CLIError, "Unknown argument '#{e.args.first}'"
end
def puppetdb_client
return @puppetdb_client if @puppetdb_client
puppetdb_config = Bolt::PuppetDB::Config.load_config(nil, config.puppetdb)
@puppetdb_client = Bolt::PuppetDB::Client.new(puppetdb_config)
end
def plugins
@plugins ||= Bolt::Plugin.setup(config, pal, puppetdb_client, analytics)
end
def query_puppetdb_nodes(query)
puppetdb_client.query_certnames(query)
end
def warn_inventory_overrides_cli(opts)
inventory_source = if ENV[Bolt::Inventory::ENVIRONMENT_VAR]
Bolt::Inventory::ENVIRONMENT_VAR
elsif @config.inventoryfile && Bolt::Util.file_stat(@config.inventoryfile)
@config.inventoryfile
elsif (inventory_file = @config.default_inventoryfile.find do |file|
begin
Bolt::Util.file_stat(file)
rescue Errno::ENOENT
false
end
end
)
inventory_file
end
inventory_cli_opts = %i[authentication escalation transports].each_with_object([]) do |key, acc|
acc.concat(Bolt::BoltOptionParser::OPTIONS[key])
end
inventory_cli_opts.concat(%w[no-host-key-check no-ssl no-ssl-verify no-tty])
conflicting_options = Set.new(opts.keys.map(&:to_s)).intersection(inventory_cli_opts)
if inventory_source && conflicting_options.any?
@logger.warn("CLI arguments #{conflicting_options.to_a} may be overridden by Inventory: #{inventory_source}")
end
end
def execute(options)
message = nil
handler = Signal.trap :INT do |signo|
@logger.info(
"Exiting after receiving SIG#{Signal.signame(signo)} signal.#{message ? ' ' + message : ''}"
)
exit!
end
if options[:action] == 'convert'
convert_plan(options[:object])
return 0
end
screen = "#{options[:subcommand]}_#{options[:action]}"
# submit a different screen for `bolt task show` and `bolt task show foo`
if options[:action] == 'show' && options[:object]
screen += '_object'
end
screen_view_fields = {
output_format: config.format,
boltdir_type: config.boltdir.type
}
# Only include target and inventory info for commands that take a targets
# list. This avoids loading inventory for commands that don't need it.
if options.key?(:targets)
screen_view_fields.merge!(target_nodes: options[:targets].count,
inventory_nodes: inventory.node_names.count,
inventory_groups: inventory.group_names.count,
inventory_version: inventory.version)
end
analytics.screen_view(screen, screen_view_fields)
if options[:action] == 'show'
if options[:subcommand] == 'task'
if options[:object]
show_task(options[:object])
else
list_tasks
end
elsif options[:subcommand] == 'plan'
if options[:object]
show_plan(options[:object])
else
list_plans
end
elsif options[:subcommand] == 'inventory'
if options[:detail]
show_targets
else
list_targets
end
elsif options[:subcommand] == 'group'
list_groups
end
return 0
elsif options[:action] == 'show-modules'
list_modules
return 0
end
message = 'There may be processes left executing on some nodes.'
if %w[task plan].include?(options[:subcommand]) && options[:task_options] && !options[:params_parsed] && pal
options[:task_options] = pal.parse_params(options[:subcommand], options[:object], options[:task_options])
end
case options[:subcommand]
when 'project'
code = initialize_project
when 'plan'
code = run_plan(options[:object], options[:task_options], options[:target_args], options)
when 'puppetfile'
if options[:action] == 'generate-types'
code = generate_types
elsif options[:action] == 'install'
code = install_puppetfile(@config.puppetfile_config, @config.puppetfile, @config.modulepath)
end
when 'secret'
code = Bolt::Secret.execute(plugins, outputter, options)
when 'apply'
if options[:object]
validate_file('manifest', options[:object])
options[:code] = File.read(File.expand_path(options[:object]))
end
code = apply_manifest(options[:code], options[:targets], options[:object], options[:noop])
else
executor = Bolt::Executor.new(config.concurrency, analytics, options[:noop])
targets = options[:targets]
results = nil
outputter.print_head
elapsed_time = Benchmark.realtime do
executor_opts = {}
executor_opts[:description] = options[:description] if options.key?(:description)
executor.subscribe(outputter)
executor.subscribe(log_outputter)
results =
case options[:subcommand]
when 'command'
executor.run_command(targets, options[:object], executor_opts)
when 'script'
script = options[:object]
validate_file('script', script)
executor.run_script(targets, script, options[:leftovers], executor_opts)
when 'task'
pal.run_task(options[:object],
targets,
options[:task_options],
executor,
inventory,
options[:description])
when 'file'
src = options[:object]
dest = options[:leftovers].first
if dest.nil?
raise Bolt::CLIError, "A destination path must be specified"
end
validate_file('source file', src, true)
executor.upload_file(targets, src, dest, executor_opts)
end
end
executor.shutdown
rerun.update(results)
outputter.print_summary(results, elapsed_time)
code = results.ok ? 0 : 2
end
code
rescue Bolt::Error => e
outputter.fatal_error(e)
raise e
ensure
# restore original signal handler
Signal.trap :INT, handler if handler
analytics&.finish
end
def show_task(task_name)
outputter.print_task_info(pal.get_task_info(task_name))
end
def list_tasks
outputter.print_tasks(pal.list_tasks, pal.list_modulepath)
end
def show_plan(plan_name)
outputter.print_plan_info(pal.get_plan_info(plan_name))
end
def list_plans
outputter.print_plans(pal.list_plans, pal.list_modulepath)
end
def list_targets
update_targets(options)
outputter.print_targets(options[:targets])
end
def show_targets
update_targets(options)
outputter.print_target_info(options[:targets])
end
def list_groups
groups = inventory.group_names
outputter.print_groups(groups)
end
def run_plan(plan_name, plan_arguments, nodes, options)
unless nodes.empty?
if plan_arguments['nodes']
raise Bolt::CLIError,
"A plan's 'nodes' parameter may be specified using the --nodes option, but in that " \
"case it must not be specified as a separate nodes=<value> parameter nor included " \
"in the JSON data passed in the --params option"
end
plan_arguments['nodes'] = nodes.join(',')
end
plan_context = { plan_name: plan_name,
params: plan_arguments }
plan_context[:description] = options[:description] if options[:description]
executor = Bolt::Executor.new(config.concurrency, analytics, options[:noop])
if options.fetch(:format, 'human') == 'human'
executor.subscribe(outputter)
else
# Only subscribe to out::message events for JSON outputter
executor.subscribe(outputter, [:message])
end
executor.subscribe(log_outputter)
executor.start_plan(plan_context)
result = pal.run_plan(plan_name, plan_arguments, executor, inventory, puppetdb_client)
# If a non-bolt exception bubbles up the plan won't get finished
executor.finish_plan(result)
executor.shutdown
rerun.update(result)
outputter.print_plan_result(result)
result.ok? ? 0 : 1
end
def apply_manifest(code, targets, filename = nil, noop = false)
ast = pal.parse_manifest(code, filename)
executor = Bolt::Executor.new(config.concurrency, analytics, noop)
executor.subscribe(outputter) if options.fetch(:format, 'human') == 'human'
executor.subscribe(log_outputter)
# apply logging looks like plan logging, so tell the outputter we're in a
# plan even though we're not
executor.publish_event(type: :plan_start, plan: nil)
results = nil
elapsed_time = Benchmark.realtime do
pal.in_plan_compiler(executor, inventory, puppetdb_client) do |compiler|
compiler.call_function('apply_prep', targets)
end
results = pal.with_bolt_executor(executor, inventory, puppetdb_client) do
Puppet.lookup(:apply_executor).apply_ast(ast, targets, catch_errors: true, noop: noop)
end
end
executor.shutdown
outputter.print_apply_result(results, elapsed_time)
rerun.update(results)
results.ok ? 0 : 1
end
def list_modules
outputter.print_module_list(pal.list_modules)
end
def generate_types
# generate_types will surface a nice error with helpful message if it fails
pal.generate_types
0
end
def initialize_project
path = File.expand_path(options[:object] || Dir.pwd)
FileUtils.mkdir_p(path)
ok = FileUtils.touch(File.join(path, 'bolt.yaml'))
result = if ok
"Successfully created Bolt project directory at #{path}"
else
"Could not create Bolt project directory at #{path}"
end
outputter.print_message result
ok ? 0 : 1
end
def install_puppetfile(config, puppetfile, modulepath)
require 'r10k/cli'
require 'bolt/r10k_log_proxy'
if puppetfile.exist?
moduledir = modulepath.first.to_s
r10k_opts = {
root: puppetfile.dirname.to_s,
puppetfile: puppetfile.to_s,
moduledir: moduledir
}
settings = R10K::Settings.global_settings.evaluate(config)
R10K::Initializers::GlobalInitializer.new(settings).call
install_action = R10K::Action::Puppetfile::Install.new(r10k_opts, nil)
# Override the r10k logger with a proxy to our own logger
R10K::Logging.instance_variable_set(:@outputter, Bolt::R10KLogProxy.new)
ok = install_action.call
outputter.print_puppetfile_result(ok, puppetfile, moduledir)
# Automatically generate types after installing modules
pal.generate_types
ok ? 0 : 1
else
raise Bolt::FileError.new("Could not find a Puppetfile at #{puppetfile}", puppetfile)
end
rescue R10K::Error => e
raise PuppetfileError, e
end
def pal
@pal ||= Bolt::PAL.new(config.modulepath,
config.hiera_config,
config.boltdir.resource_types,
config.compile_concurrency)
end
def convert_plan(plan)
pal.convert_plan(plan)
end
def validate_file(type, path, allow_dir = false)
if path.nil?
raise Bolt::CLIError, "A #{type} must be specified"
end
Bolt::Util.validate_file(type, path, allow_dir)
end
def rerun
@rerun ||= Bolt::Rerun.new(@config.rerunfile, @config.save_rerun)
end
def outputter
@outputter ||= Bolt::Outputter.for_format(config.format, config.color, options[:verbose], config.trace)
end
def log_outputter
@log_outputter ||= Bolt::Outputter::Logger.new(options[:verbose], config.trace)
end
def analytics
@analytics ||= begin
client = Bolt::Analytics.build_client
client.bundled_content = bundled_content
client
end
end
def bundled_content
# We only need to enumerate bundled content when running a task or plan
content = { 'Plan' => [],
'Task' => [],
'Plugin' => Bolt::Plugin::BUILTIN_PLUGINS }
if %w[plan task].include?(options[:subcommand]) && options[:action] == 'run'
default_content = Bolt::PAL.new([], nil, nil)
content['Plan'] = default_content.list_plans.each_with_object([]) do |iter, col|
col << iter&.first
end
content['Task'] = default_content.list_tasks.each_with_object([]) do |iter, col|
col << iter&.first
end
end
content
end
end
end
| 1 | 13,074 | We check for `$future` in PuppetDB::Config, so this won't change behavior if you don't have `future` set. | puppetlabs-bolt | rb |
@@ -1279,6 +1279,8 @@ class _Frame(object):
col_by = [_resolve_col(df, col_or_s) for col_or_s in by]
return DataFrameGroupBy(df_or_s, col_by, as_index=as_index)
if isinstance(df_or_s, Series):
+ if not isinstance(by[0], Series):
+ raise KeyError(by[0])
col = df_or_s # type: Series
anchor = df_or_s._kdf
col_by = [_resolve_col(anchor, col_or_s) for col_or_s in by] | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class to be monkey-patched to DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
import warnings
from collections import Counter
from collections.abc import Iterable
from distutils.version import LooseVersion
from functools import reduce
import numpy as np
import pandas as pd
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.readwriter import OptionUtils
from pyspark.sql.types import DataType, DoubleType, FloatType
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.indexing import AtIndexer, ILocIndexer, LocIndexer
from databricks.koalas.internal import _InternalFrame
from databricks.koalas.utils import validate_arguments_and_invoke_function, scol_for
from databricks.koalas.window import Rolling, Expanding
class _Frame(object):
"""
The base class for both DataFrame and Series.
"""
def __init__(self, internal: _InternalFrame):
self._internal = internal # type: _InternalFrame
# TODO: add 'axis' parameter
def cummin(self, skipna: bool = True):
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ks.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._cum(F.min, skipna) # type: ignore
# TODO: add 'axis' parameter
def cummax(self, skipna: bool = True):
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ks.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._cum(F.max, skipna) # type: ignore
# TODO: add 'axis' parameter
def cumsum(self, skipna: bool = True):
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ks.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._cum(F.sum, skipna) # type: ignore
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self, skipna: bool = True):
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', Koalas' emulates cumulative product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ks.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
from pyspark.sql.functions import pandas_udf
def cumprod(scol):
@pandas_udf(returnType=self._kdf._internal.spark_type_for(self.name))
def negative_check(s):
assert len(s) == 0 or ((s > 0) | (s.isnull())).all(), \
"values should be bigger than 0: %s" % s
return s
return F.sum(F.log(negative_check(scol)))
return self._cum(cumprod, skipna) # type: ignore
def get_dtype_counts(self):
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ks.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = self.dtypes
return pd.Series(dict(Counter([d.name for d in list(dtypes)])))
def pipe(self, func, *args, **kwargs):
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ks.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ks.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError('%s is both the pipe target and a keyword '
'argument' % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self):
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
"""
return self.to_pandas().values
def to_csv(self, path=None, sep=',', na_rep='', columns=None, header=True,
quotechar='"', date_format=None, escapechar=None, num_files=None,
**options):
r"""
Write object to a comma-separated values (csv) file.
.. note:: Koalas `to_csv` writes files to a path or URI. Unlike pandas', Koalas
respects HDFS's property such as 'fs.default.name'.
.. note:: Koalas writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ks.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ks.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
"""
if path is None:
# If path is none, just collect and use pandas's to_csv.
kdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and \
isinstance(self, ks.Series):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return kdf_or_ser.to_pandas().to_csv(
None, sep=sep, na_rep=na_rep, header=header,
date_format=date_format, index=False)
else:
return kdf_or_ser.to_pandas().to_csv(
None, sep=sep, na_rep=na_rep, columns=columns,
header=header, quotechar=quotechar,
date_format=date_format, escapechar=escapechar, index=False)
kdf = self
if isinstance(self, ks.Series):
kdf = self.to_frame()
if columns is None:
column_index = kdf._internal.column_index
elif isinstance(columns, str):
column_index = [(columns,)]
elif isinstance(columns, tuple):
column_index = [columns]
else:
column_index = [idx if isinstance(idx, tuple) else (idx,) for idx in columns]
if header is True and kdf._internal.column_index_level > 1:
raise ValueError('to_csv only support one-level index column now')
elif isinstance(header, list):
sdf = kdf._sdf.select(
[self._internal.scol_for(idx).alias(new_name)
for (idx, new_name) in zip(column_index, header)])
header = True
else:
sdf = kdf._sdf.select([kdf._internal.scol_for(idx) for idx in column_index])
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode("overwrite")
OptionUtils._set_opts(
builder,
path=path, sep=sep, nullValue=na_rep, header=header,
quote=quotechar, dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar)
builder.options(**options).format("csv").save(path)
def to_json(self, path=None, compression='uncompressed', num_files=None, **options):
"""
Convert the object to a JSON string.
.. note:: Koalas `to_json` writes files to a path or URI. Unlike pandas', Koalas
respects HDFS's property such as 'fs.default.name'.
.. note:: Koalas writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Examples
--------
>>> df = ks.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ks.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ks.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1
0 a
1 c
"""
if path is None:
# If path is none, just collect and use pandas's to_json.
kdf_or_ser = self
pdf = kdf_or_ser.to_pandas()
if isinstance(self, ks.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient='records')
kdf = self
if isinstance(self, ks.Series):
kdf = self.to_frame()
sdf = kdf.to_spark()
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode("overwrite")
OptionUtils._set_opts(builder, compression=compression)
builder.options(**options).format("json").save(path)
def to_excel(self, excel_writer, sheet_name="Sheet1", na_rep="", float_format=None,
columns=None, header=True, index=True, index_label=None, startrow=0,
startcol=0, engine=None, merge_cells=True, encoding=None, inf_rep="inf",
verbose=True, freeze_panes=None):
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ks.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
if isinstance(self, ks.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ks.Series):
f = pd.Series.to_excel
else:
raise TypeError('Constructor expects DataFrame or Series; however, '
'got [%s]' % (self,))
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_excel, f, args)
def mean(self, axis=None, numeric_only=True):
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
Name: 0, dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
return self._reduce_for_stat_function(
F.mean, name="mean", numeric_only=numeric_only, axis=axis)
def sum(self, axis=None, numeric_only=True):
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.6
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.2
2 3.3
3 0.0
Name: 0, dtype: float64
On a Series:
>>> df['a'].sum()
6.0
"""
return self._reduce_for_stat_function(
F.sum, name="sum", numeric_only=numeric_only, axis=axis)
def skew(self, axis=None, numeric_only=True):
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
return self._reduce_for_stat_function(
F.skewness, name="skew", numeric_only=numeric_only, axis=axis)
def kurtosis(self, axis=None, numeric_only=True):
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
return self._reduce_for_stat_function(
F.kurtosis, name="kurtosis", numeric_only=numeric_only, axis=axis)
kurt = kurtosis
def min(self, axis=None, numeric_only=False):
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
Name: 0, dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
return self._reduce_for_stat_function(
F.min, name="min", numeric_only=numeric_only, axis=axis)
def max(self, axis=None, numeric_only=False):
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
Name: 0, dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
return self._reduce_for_stat_function(
F.max, name="max", numeric_only=numeric_only, axis=axis)
def std(self, axis=None, numeric_only=True):
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
Name: 0, dtype: float64
On a Series:
>>> df['a'].std()
1.0
"""
return self._reduce_for_stat_function(
F.stddev, name="std", numeric_only=numeric_only, axis=axis)
def var(self, axis=None, numeric_only=True):
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
Name: 0, dtype: float64
On a Series:
>>> df['a'].var()
1.0
"""
return self._reduce_for_stat_function(
F.variance, name="var", numeric_only=numeric_only, axis=axis)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ks.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ks.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
3
"""
return len(self) # type: ignore
def abs(self):
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ks.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
Name: 0, dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ks.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
# TODO: The first example above should not have "Name: 0".
return _spark_col_apply(self, F.abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(self, by, as_index: bool = True):
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
koalas.groupby.GroupBy
Examples
--------
>>> df = ks.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean()
Animal Max Speed
0 Falcon 375.0
1 Parrot 25.0
"""
from databricks.koalas.frame import DataFrame
from databricks.koalas.series import Series
from databricks.koalas.groupby import DataFrameGroupBy, SeriesGroupBy
df_or_s = self
if isinstance(by, str):
by = [(by,)]
elif isinstance(by, tuple):
by = [by]
elif isinstance(by, Series):
by = [by]
elif isinstance(by, Iterable):
by = [key if isinstance(key, (tuple, Series)) else (key,) for key in by]
else:
raise ValueError('Not a valid index: TODO')
if not len(by):
raise ValueError('No group keys passed!')
if isinstance(df_or_s, DataFrame):
df = df_or_s # type: DataFrame
col_by = [_resolve_col(df, col_or_s) for col_or_s in by]
return DataFrameGroupBy(df_or_s, col_by, as_index=as_index)
if isinstance(df_or_s, Series):
col = df_or_s # type: Series
anchor = df_or_s._kdf
col_by = [_resolve_col(anchor, col_or_s) for col_or_s in by]
return SeriesGroupBy(col, col_by, as_index=as_index)
raise TypeError('Constructor expects DataFrame or Series; however, '
'got [%s]' % (df_or_s,))
def bool(self):
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Examples
--------
>>> ks.DataFrame({'a': [True]}).bool()
True
>>> ks.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ks.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ks.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ks.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ks.DataFrame):
df = self
elif isinstance(self, ks.Series):
df = self.to_dataframe()
else:
raise TypeError('bool() expects DataFrame or Series; however, '
'got [%s]' % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self):
"""
Retrieves the index of the first valid value.
Returns
-------
idx_first_valid : type of index
Examples
--------
Support for DataFrame
>>> kdf = ks.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> kdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> kdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> kdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> kdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> kdf.first_valid_index()
'W'
Support for Series.
>>> s = ks.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
Name: 0, dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ks.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
Name: 0, dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
sdf = self._internal.sdf
column_scols = self._internal.column_scols
cond = reduce(lambda x, y: x & y,
map(lambda x: x.isNotNull(), column_scols))
first_valid_row = sdf.where(cond).first()
first_valid_idx = tuple(first_valid_row[idx_col]
for idx_col in self._internal.index_columns)
if len(first_valid_idx) == 1:
first_valid_idx = first_valid_idx[0]
return first_valid_idx
def median(self, accuracy=10000):
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in Koalas is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ks.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
Name: 0, dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['a'] + 100).median()
125.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
Name: 0, dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('x', 'a')] + 100).median()
125.0
"""
if not isinstance(accuracy, int):
raise ValueError("accuracy must be an integer; however, got [%s]" % type(accuracy))
from databricks.koalas.frame import DataFrame
from databricks.koalas.series import Series, _col
kdf_or_kser = self
if isinstance(kdf_or_kser, Series):
kser = _col(kdf_or_kser.to_frame())
return kser._reduce_for_stat_function(
lambda _: F.expr("approx_percentile(`%s`, 0.5, %s)"
% (kser._internal.data_columns[0], accuracy)),
name="median")
assert isinstance(kdf_or_kser, DataFrame)
# This code path cannot reuse `_reduce_for_stat_function` since there looks no proper way
# to get a column name from Spark column but we need it to pass it through `expr`.
kdf = kdf_or_kser
sdf = kdf._sdf.select(kdf._internal.scols)
median = lambda name: F.expr("approx_percentile(`%s`, 0.5, %s)" % (name, accuracy))
sdf = sdf.select([median(col).alias(col) for col in kdf._internal.data_columns])
# Attach a dummy column for index to avoid default index.
sdf = sdf.withColumn('__DUMMY__', F.monotonically_increasing_id())
# This is expected to be small so it's fine to transpose.
return DataFrame(kdf._internal.copy(
sdf=sdf,
index_map=[('__DUMMY__', None)],
column_scols=[scol_for(sdf, col) for col in kdf._internal.data_columns])) \
._to_internal_pandas().transpose().iloc[:, 0]
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(self, window, min_periods=None):
"""
Provide rolling transformations.
.. note:: 'min_periods' in Koalas works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/databricks/koalas/pull/607
def expanding(self, min_periods=1):
"""
Provide expanding transformations.
.. note:: 'min_periods' in Koalas works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
return Expanding(self, min_periods=min_periods)
@property
def at(self):
return AtIndexer(self)
at.__doc__ = AtIndexer.__doc__
@property
def iloc(self):
return ILocIndexer(self)
iloc.__doc__ = ILocIndexer.__doc__
@property
def loc(self):
return LocIndexer(self)
loc.__doc__ = LocIndexer.__doc__
def compute(self):
"""Alias of `to_pandas()` to mimic dask for easily porting tests."""
return self.toPandas()
@staticmethod
def _count_expr(col: spark.Column, spark_type: DataType) -> spark.Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas Pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(col, F.lit(None)))
else:
return F.count(col)
def _resolve_col(kdf, col_like):
if isinstance(col_like, ks.Series):
if kdf is not col_like._kdf:
raise ValueError(
"Cannot combine the series because it comes from a different dataframe. "
"In order to allow this operation, enable 'compute.ops_on_diff_frames' option.")
return col_like
elif isinstance(col_like, tuple):
return kdf[col_like]
else:
raise ValueError(col_like)
def _spark_col_apply(kdf_or_kser, sfun):
"""
Performs a function to all cells on a dataframe, the function being a known sql function.
"""
from databricks.koalas.frame import DataFrame
from databricks.koalas.series import Series
if isinstance(kdf_or_kser, Series):
kser = kdf_or_kser
return kser._with_new_scol(sfun(kser._scol))
assert isinstance(kdf_or_kser, DataFrame)
kdf = kdf_or_kser
sdf = kdf._sdf
sdf = sdf.select([sfun(kdf._internal.scol_for(col)).alias(col) for col in kdf.columns])
return DataFrame(sdf)
| 1 | 13,343 | I think you should remove `if isinstance(by, str):`,. We also should fix the error message `raise ValueError('Not a valid index: TODO')` to match with pandas' | databricks-koalas | py |
@@ -48,6 +48,7 @@ namespace OpenTelemetry.Instrumentation.Grpc.Implementation
activity.SetKind(ActivityKind.Client);
activity.DisplayName = grpcMethod?.Trim('/');
+ activity.SetCustomProperty("GrpcHandler.Request", request);
this.activitySource.Start(activity);
| 1 | // <copyright file="GrpcClientDiagnosticListener.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Diagnostics;
using System.Net.Http;
using OpenTelemetry.Trace;
namespace OpenTelemetry.Instrumentation.Grpc.Implementation
{
internal class GrpcClientDiagnosticListener : ListenerHandler
{
private readonly ActivitySourceAdapter activitySource;
private readonly PropertyFetcher startRequestFetcher = new PropertyFetcher("Request");
public GrpcClientDiagnosticListener(ActivitySourceAdapter activitySource)
: base("Grpc.Net.Client")
{
if (activitySource == null)
{
throw new ArgumentNullException(nameof(activitySource));
}
this.activitySource = activitySource;
}
public override void OnStartActivity(Activity activity, object payload)
{
if (!(this.startRequestFetcher.Fetch(payload) is HttpRequestMessage request))
{
GrpcInstrumentationEventSource.Log.NullPayload(nameof(GrpcClientDiagnosticListener), nameof(this.OnStartActivity));
return;
}
var grpcMethod = GrpcTagHelper.GetGrpcMethodFromActivity(activity);
activity.SetKind(ActivityKind.Client);
activity.DisplayName = grpcMethod?.Trim('/');
this.activitySource.Start(activity);
if (activity.IsAllDataRequested)
{
activity.SetTag(SemanticConventions.AttributeRpcSystem, GrpcTagHelper.RpcSystemGrpc);
if (GrpcTagHelper.TryParseRpcServiceAndRpcMethod(grpcMethod, out var rpcService, out var rpcMethod))
{
activity.SetTag(SemanticConventions.AttributeRpcService, rpcService);
activity.SetTag(SemanticConventions.AttributeRpcMethod, rpcMethod);
}
var uriHostNameType = Uri.CheckHostName(request.RequestUri.Host);
if (uriHostNameType == UriHostNameType.IPv4 || uriHostNameType == UriHostNameType.IPv6)
{
activity.SetTag(SemanticConventions.AttributeNetPeerIp, request.RequestUri.Host);
}
else
{
activity.SetTag(SemanticConventions.AttributeNetPeerName, request.RequestUri.Host);
}
activity.SetTag(SemanticConventions.AttributeNetPeerPort, request.RequestUri.Port.ToString());
}
}
public override void OnStopActivity(Activity activity, object payload)
{
if (activity.IsAllDataRequested)
{
activity.SetStatus(GrpcTagHelper.GetGrpcStatusCodeFromActivity(activity));
}
this.activitySource.Stop(activity);
}
}
}
| 1 | 16,358 | Curious - do we want this to be `OTel.GrpcHandler.Request`? For example, if someone is debugging a crash dump, they would get some hint that it was added by OpenTelemetry. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -52,6 +52,12 @@ public interface TableOperations {
* Implementations must check that the base metadata is current to avoid overwriting updates.
* Once the atomic commit operation succeeds, implementations must not perform any operations that
* may fail because failure in this method cannot be distinguished from commit failure.
+ * <p></p>
+ * Implementations must throw a CommitStateUnknownException in cases where it cannot be determined if the
+ * commit succeeded or failed. For example if a network partition causes the confirmation of the commit to be lost,
+ * the implementation should throw a CommitStateUnknownException. This is important because downstream users of
+ * this API need to know whether they can clean up the commit or not, if the state is unknown then it is not safe
+ * to remove any files. All other exceptions will be treated as if the commit has failed.
*
* @param base table metadata on which changes were based
* @param metadata new table metadata with updates | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.UUID;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.encryption.PlaintextEncryptionManager;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.LocationProvider;
/**
* SPI interface to abstract table metadata access and updates.
*/
public interface TableOperations {
/**
* Return the currently loaded table metadata, without checking for updates.
*
* @return table metadata
*/
TableMetadata current();
/**
* Return the current table metadata after checking for updates.
*
* @return table metadata
*/
TableMetadata refresh();
/**
* Replace the base table metadata with a new version.
* <p>
* This method should implement and document atomicity guarantees.
* <p>
* Implementations must check that the base metadata is current to avoid overwriting updates.
* Once the atomic commit operation succeeds, implementations must not perform any operations that
* may fail because failure in this method cannot be distinguished from commit failure.
*
* @param base table metadata on which changes were based
* @param metadata new table metadata with updates
*/
void commit(TableMetadata base, TableMetadata metadata);
/**
* Returns a {@link FileIO} to read and write table data and metadata files.
*/
FileIO io();
/**
* Returns a {@link org.apache.iceberg.encryption.EncryptionManager} to encrypt and decrypt data files.
*/
default EncryptionManager encryption() {
return new PlaintextEncryptionManager();
}
/**
* Given the name of a metadata file, obtain the full path of that file using an appropriate base
* location of the implementation's choosing.
* <p>
* The file may not exist yet, in which case the path should be returned as if it were to be created
* by e.g. {@link FileIO#newOutputFile(String)}.
*/
String metadataFileLocation(String fileName);
/**
* Returns a {@link LocationProvider} that supplies locations for new new data files.
*
* @return a location provider configured for the current table state
*/
LocationProvider locationProvider();
/**
* Return a temporary {@link TableOperations} instance that uses configuration from uncommitted metadata.
* <p>
* This is called by transactions when uncommitted table metadata should be used; for example, to create a metadata
* file location based on metadata in the transaction that has not been committed.
* <p>
* Transactions will not call {@link #refresh()} or {@link #commit(TableMetadata, TableMetadata)}.
*
* @param uncommittedMetadata uncommitted table metadata
* @return a temporary table operations that behaves like the uncommitted metadata is current
*/
default TableOperations temp(TableMetadata uncommittedMetadata) {
return this;
}
/**
* Create a new ID for a Snapshot
*
* @return a long snapshot ID
*/
default long newSnapshotId() {
UUID uuid = UUID.randomUUID();
long mostSignificantBits = uuid.getMostSignificantBits();
long leastSignificantBits = uuid.getLeastSignificantBits();
return Math.abs(mostSignificantBits ^ leastSignificantBits);
}
}
| 1 | 34,901 | nit: unnecessary `</p>` | apache-iceberg | java |
@@ -194,7 +194,16 @@ std::shared_ptr<Engine> ADIOS::Open(const std::string &name,
"HDF5 library, can't use HDF5\n");
#endif
}
-
+ else if (type == "HDF5Reader") // -Junmin
+ {
+#if defined(ADIOS_HAVE_PHDF5) && defined(ADIOS_HAVE_MPI)
+ return std::make_shared<HDF5Reader>(*this, name, accessMode, mpiComm,
+ method);
+#else
+ throw std::invalid_argument("ERROR: this version didn't compile with "
+ "HDF5 library, can't use HDF5\n");
+#endif
+ }
else
{
if (m_DebugMode == true) | 1 | /*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*
* ADIOS.cpp
*
* Created on: Sep 29, 2016
* Author: William F Godoy
*/
#include "ADIOS.h"
#include "ADIOS.tcc"
#include <fstream>
#include <ios> //std::ios_base::failure
#include <iostream>
#include <sstream>
#include <utility>
#include "adios2/ADIOSMPI.h"
#include "adios2/ADIOSMacros.h"
#include "adios2/core/adiosFunctions.h"
#include "adios2/engine/bp/BPFileReader.h"
#include "adios2/engine/bp/BPFileWriter.h"
#ifdef ADIOS2_HAVE_DATAMAN // external dependencies
#include "adios2/engine/dataman/DataManReader.h"
#include "adios2/engine/dataman/DataManWriter.h"
#endif
#ifdef ADIOS2_HAVE_ADIOS1 // external dependencies
#include "adios2/engine/adios1/ADIOS1Reader.h"
#include "adios2/engine/adios1/ADIOS1Writer.h"
#endif
#ifdef ADIOS2_HAVE_HDF5 // external dependencies
#include "adios2/engine/hdf5/HDF5ReaderP.h"
#include "adios2/engine/hdf5/HDF5WriterP.h"
#endif
namespace adios
{
ADIOS::ADIOS(const Verbose verbose, const bool debugMode)
: ADIOS("", MPI_COMM_SELF, verbose, debugMode)
{
}
ADIOS::ADIOS(const std::string config, const Verbose verbose,
const bool debugMode)
: ADIOS(config, MPI_COMM_SELF, verbose, debugMode)
{
}
ADIOS::ADIOS(const std::string configFile, MPI_Comm mpiComm,
const Verbose verbose, const bool debugMode)
: m_MPIComm(mpiComm), m_ConfigFile(configFile), m_DebugMode(debugMode)
{
InitMPI();
// InitXML( m_XMLConfigFile, m_MPIComm, m_DebugMode, m_HostLanguage,
// m_Transforms, m_Groups );
}
ADIOS::ADIOS(MPI_Comm mpiComm, const Verbose verbose, const bool debugMode)
: ADIOS("", mpiComm, verbose, debugMode)
{
}
// ADIOS::~ADIOS() {}
void ADIOS::InitMPI()
{
if (m_DebugMode == true)
{
if (m_MPIComm == MPI_COMM_NULL)
{
throw std::ios_base::failure(
"ERROR: engine communicator is MPI_COMM_NULL,"
" in call to ADIOS Open or Constructor\n");
}
}
MPI_Comm_rank(m_MPIComm, &m_RankMPI);
MPI_Comm_size(m_MPIComm, &m_SizeMPI);
}
Method &ADIOS::DeclareMethod(const std::string methodName)
{
if (m_DebugMode == true)
{
if (m_Methods.count(methodName) == 1)
{
throw std::invalid_argument(
"ERROR: method " + methodName +
" already declared, from DeclareMethod\n");
}
}
m_Methods.emplace(methodName, Method(methodName, m_DebugMode));
return m_Methods.at(methodName);
}
std::shared_ptr<Engine> ADIOS::Open(const std::string &name,
const std::string accessMode,
MPI_Comm mpiComm, const Method &method)
{
if (m_DebugMode == true)
{
if (m_EngineNames.count(name) == 1) // Check if Engine already exists
{
throw std::invalid_argument(
"ERROR: engine name " + name +
" already created by Open, in call from Open.\n");
}
}
m_EngineNames.insert(name);
const std::string type(method.m_Type);
const bool isDefaultWriter =
(accessMode == "w" || accessMode == "write" || accessMode == "a" ||
accessMode == "append") &&
type.empty()
? true
: false;
const bool isDefaultReader =
(accessMode == "r" || accessMode == "read") && type.empty() ? true
: false;
if (isDefaultWriter || type == "BPFileWriter" || type == "bpfilewriter")
{
return std::make_shared<BPFileWriter>(*this, name, accessMode, mpiComm,
method);
}
else if (isDefaultReader || type == "BPReader" || type == "bpreader")
{
return std::make_shared<BPFileReader>(*this, name, accessMode, mpiComm,
method);
}
else if (type == "SIRIUS" || type == "sirius" || type == "Sirius")
{
// not yet supported
// return std::make_shared<engine::DataMan>( *this, name, accessMode,
// mpiComm, method, iomode, timeout_sec, m_DebugMode, method.m_nThreads
// );
}
else if (type == "DataManWriter")
{
#ifdef ADIOS2_HAVE_DATAMAN
return std::make_shared<DataManWriter>(*this, name, accessMode, mpiComm,
method);
#else
throw std::invalid_argument(
"ERROR: this version didn't compile with "
"Dataman library, can't Open DataManWriter\n");
#endif
}
else if (type == "DataManReader")
{
#ifdef ADIOS2_HAVE_DATAMAN
return std::make_shared<DataManReader>(*this, name, accessMode, mpiComm,
method);
#else
throw std::invalid_argument(
"ERROR: this version didn't compile with "
"Dataman library, can't Open DataManReader\n");
#endif
}
else if (type == "ADIOS1Writer")
{
#ifdef ADIOS2_HAVE_ADIOS1
return std::make_shared<ADIOS1Writer>(*this, name, accessMode, mpiComm,
method);
#else
throw std::invalid_argument(
"ERROR: this version didn't compile with ADIOS "
"1.x library, can't Open ADIOS1Writer\n");
#endif
}
else if (type == "Vis")
{
// return std::make_shared<Vis>( *this, name, accessMode, mpiComm,
// method,
// iomode, timeout_sec, m_DebugMode, method.m_nThreads );
}
else if (type == "HDF5Writer") // -junmin
{
#ifdef ADIOS2_HAVE_HDF5
return std::make_shared<HDF5Writer>(*this, name, accessMode, mpiComm,
method);
#else
throw std::invalid_argument("ERROR: this version didn't compile with "
"HDF5 library, can't use HDF5\n");
#endif
}
else
{
if (m_DebugMode == true)
{
throw std::invalid_argument("ERROR: method type " + type +
" not supported for " + name +
", in call to Open\n");
}
}
return nullptr; // if debug mode is off
}
std::shared_ptr<Engine> ADIOS::Open(const std::string &name,
const std::string accessMode,
const Method &method)
{
return Open(name, accessMode, m_MPIComm, method);
}
std::shared_ptr<Engine> ADIOS::Open(const std::string &name,
const std::string accessMode,
MPI_Comm mpiComm,
const std::string methodName)
{
auto itMethod = m_Methods.find(methodName);
if (m_DebugMode == true)
{
CheckMethod(itMethod, methodName, " in call to Open\n");
}
return Open(name, accessMode, mpiComm, itMethod->second);
}
std::shared_ptr<Engine> ADIOS::Open(const std::string &name,
const std::string accessMode,
const std::string methodName)
{
return Open(name, accessMode, m_MPIComm, methodName);
}
std::shared_ptr<Engine> ADIOS::OpenFileReader(const std::string &fileName,
MPI_Comm mpiComm,
const Method &method)
{
return Open(fileName, "r", mpiComm, method);
}
std::shared_ptr<Engine> ADIOS::OpenFileReader(const std::string &name,
MPI_Comm mpiComm,
const std::string methodName)
{
auto itMethod = m_Methods.find(methodName);
if (m_DebugMode == true)
{
CheckMethod(itMethod, methodName, " in call to Open\n");
}
return Open(name, "r", m_MPIComm, itMethod->second);
}
VariableCompound &ADIOS::GetVariableCompound(const std::string &name)
{
return m_Compound.at(GetVariableIndex<void>(name));
}
void ADIOS::MonitorVariables(std::ostream &logStream)
{
logStream << "\tVariable \t Type\n";
for (auto &variablePair : m_Variables)
{
const std::string name(variablePair.first);
const std::string type(variablePair.second.first);
if (type == GetType<char>())
{
GetVariable<char>(name).Monitor(logStream);
}
else if (type == GetType<unsigned char>())
{
GetVariable<unsigned char>(name).Monitor(logStream);
}
else if (type == GetType<short>())
{
GetVariable<short>(name).Monitor(logStream);
}
else if (type == GetType<unsigned short>())
{
GetVariable<unsigned short>(name).Monitor(logStream);
}
else if (type == GetType<int>())
{
GetVariable<int>(name).Monitor(logStream);
}
else if (type == GetType<unsigned int>())
{
GetVariable<unsigned int>(name).Monitor(logStream);
}
else if (type == GetType<long int>())
{
GetVariable<long int>(name).Monitor(logStream);
}
else if (type == GetType<unsigned long int>())
{
GetVariable<unsigned long int>(name).Monitor(logStream);
}
else if (type == GetType<long long int>())
{
GetVariable<long long int>(name).Monitor(logStream);
}
else if (type == GetType<unsigned long long int>())
{
GetVariable<unsigned long long int>(name).Monitor(logStream);
}
else if (type == GetType<float>())
{
GetVariable<float>(name).Monitor(logStream);
}
else if (type == GetType<double>())
{
GetVariable<double>(name).Monitor(logStream);
}
else if (type == GetType<long double>())
{
GetVariable<long double>(name).Monitor(logStream);
}
else if (type == GetType<std::complex<float>>())
{
GetVariable<std::complex<float>>(name).Monitor(logStream);
}
else if (type == GetType<std::complex<double>>())
{
GetVariable<std::complex<double>>(name).Monitor(logStream);
}
else if (type == GetType<std::complex<long double>>())
{
GetVariable<std::complex<long double>>(name).Monitor(logStream);
}
}
}
// PRIVATE FUNCTIONS BELOW
void ADIOS::CheckVariableInput(const std::string &name,
const Dims &dimensions) const
{
if (m_DebugMode == true)
{
if (m_Variables.count(name) == 1)
{
throw std::invalid_argument(
"ERROR: variable " + name +
" already exists, in call to DefineVariable\n");
}
if (dimensions.empty() == true)
{
throw std::invalid_argument(
"ERROR: variable " + name +
" dimensions can't be empty, in call to DefineVariable\n");
}
}
}
void ADIOS::CheckVariableName(
std::map<std::string, std::pair<std::string, unsigned int>>::const_iterator
itVariable,
const std::string &name, const std::string hint) const
{
if (m_DebugMode == true)
{
if (itVariable == m_Variables.end())
{
throw std::invalid_argument("ERROR: variable " + name +
" does not exist " + hint + "\n");
}
}
}
void ADIOS::CheckMethod(std::map<std::string, Method>::const_iterator itMethod,
const std::string methodName,
const std::string hint) const
{
if (itMethod == m_Methods.end())
{
throw std::invalid_argument("ERROR: method " + methodName +
" not found " + hint + "\n");
}
}
//------------------------------------------------------------------------------
// Explicitly instantiate the necessary template implementations
#define define_template_instantiation(T) \
template Variable<T> &ADIOS::DefineVariable<T>( \
const std::string &, const Dims, const Dims, const Dims); \
\
template Variable<T> &ADIOS::GetVariable<T>(const std::string &);
ADIOS_FOREACH_TYPE_1ARG(define_template_instantiation)
template unsigned int ADIOS::GetVariableIndex<void>(const std::string &);
#undef define_template_instatiation
//------------------------------------------------------------------------------
} // end namespace adios
| 1 | 11,457 | Should just be `#ifdef ADIOS2_HAVE_HDF5` | ornladios-ADIOS2 | cpp |
@@ -1691,7 +1691,11 @@ def add_default_resource_props(
props["LogGroupName"] = resource_name
elif res_type == "AWS::Lambda::Function" and not props.get("FunctionName"):
- props["FunctionName"] = "{}-lambda-{}".format(stack_name[:45], short_uid())
+ # FunctionName is up to 64 characters long
+ random_id_part = short_uid()
+ resource_id_part = resource_id[:24]
+ stack_name_part = stack_name[: 63 - 2 - (len(random_id_part) + len(resource_id_part))]
+ props["FunctionName"] = f"{stack_name_part}-{resource_id_part}-{random_id_part}"
elif res_type == "AWS::SNS::Topic" and not props.get("TopicName"):
props["TopicName"] = "topic-%s" % short_uid() | 1 | import base64
import copy
import json
import logging
import re
import traceback
from typing import Optional
from urllib.parse import urlparse
import botocore
from moto.cloudformation import parsing
from moto.core import CloudFormationModel as MotoCloudFormationModel
from moto.ec2.utils import generate_route_id
from six import iteritems
from localstack.constants import FALSE_STRINGS, S3_STATIC_WEBSITE_HOSTNAME, TEST_AWS_ACCOUNT_ID
from localstack.services.cloudformation.deployment_utils import (
PLACEHOLDER_AWS_NO_VALUE,
PLACEHOLDER_RESOURCE_NAME,
dump_json_params,
param_defaults,
remove_none_values,
select_parameters,
)
from localstack.services.cloudformation.service_models import (
KEY_RESOURCE_STATE,
DependencyNotYetSatisfied,
GenericBaseModel,
)
from localstack.services.s3 import s3_listener
from localstack.utils import common
from localstack.utils.aws import aws_stack
from localstack.utils.cloudformation import template_preparer
from localstack.utils.common import (
canonical_json,
get_all_subclasses,
json_safe,
md5,
prevent_stack_overflow,
short_uid,
start_worker_thread,
to_bytes,
to_str,
)
from localstack.utils.testutil import delete_all_s3_objects
ACTION_CREATE = "create"
ACTION_DELETE = "delete"
AWS_URL_SUFFIX = "localhost" # value is "amazonaws.com" in real AWS
IAM_POLICY_VERSION = "2012-10-17"
LOG = logging.getLogger(__name__)
# list of resource types that can be updated
# TODO: make this a property of the model classes themselves
UPDATEABLE_RESOURCES = [
"Lambda::Function",
"ApiGateway::Method",
"StepFunctions::StateMachine",
"IAM::Role",
"EC2::Instance",
]
# list of static attribute references to be replaced in {'Fn::Sub': '...'} strings
STATIC_REFS = ["AWS::Region", "AWS::Partition", "AWS::StackName", "AWS::AccountId"]
# maps resource type string to model class
RESOURCE_MODELS = {
model.cloudformation_type(): model for model in get_all_subclasses(GenericBaseModel)
}
class NoStackUpdates(Exception):
"""Exception indicating that no actions are to be performed in a stack update (which is not allowed)"""
pass
def lambda_get_params():
return lambda params, **kwargs: params
def rename_params(func, rename_map):
def do_rename(params, **kwargs):
values = func(params, **kwargs) if func else params
for old_param, new_param in rename_map.items():
values[new_param] = values.pop(old_param, None)
return values
return do_rename
def es_add_tags_params(params, **kwargs):
es_arn = aws_stack.es_domain_arn(params.get("DomainName"))
tags = params.get("Tags", [])
return {"ARN": es_arn, "TagList": tags}
def get_ddb_provisioned_throughput(params, **kwargs):
args = params.get("ProvisionedThroughput")
if args == PLACEHOLDER_AWS_NO_VALUE:
return {}
if args:
if isinstance(args["ReadCapacityUnits"], str):
args["ReadCapacityUnits"] = int(args["ReadCapacityUnits"])
if isinstance(args["WriteCapacityUnits"], str):
args["WriteCapacityUnits"] = int(args["WriteCapacityUnits"])
return args
def get_ddb_global_sec_indexes(params, **kwargs):
args = params.get("GlobalSecondaryIndexes")
if args:
for index in args:
provisoned_throughput = index["ProvisionedThroughput"]
if isinstance(provisoned_throughput["ReadCapacityUnits"], str):
provisoned_throughput["ReadCapacityUnits"] = int(
provisoned_throughput["ReadCapacityUnits"]
)
if isinstance(provisoned_throughput["WriteCapacityUnits"], str):
provisoned_throughput["WriteCapacityUnits"] = int(
provisoned_throughput["WriteCapacityUnits"]
)
return args
def get_ddb_kinesis_stream_specification(params, **kwargs):
args = params.get("KinesisStreamSpecification")
if args:
args["TableName"] = params["TableName"]
return args
# maps resource types to functions and parameters for creation
RESOURCE_TO_FUNCTION = {
"S3::BucketPolicy": {
"create": {
"function": "put_bucket_policy",
"parameters": rename_params(
dump_json_params(None, "PolicyDocument"), {"PolicyDocument": "Policy"}
),
}
},
"KinesisFirehose::DeliveryStream": {
"create": {
"function": "create_delivery_stream",
"parameters": select_parameters(
"DeliveryStreamName",
"DeliveryStreamType",
"S3DestinationConfiguration",
"ElasticsearchDestinationConfiguration",
),
},
"delete": {
"function": "delete_delivery_stream",
"parameters": {"DeliveryStreamName": "DeliveryStreamName"},
},
},
"Elasticsearch::Domain": {
"create": [
{
"function": "create_elasticsearch_domain",
"parameters": select_parameters(
"AccessPolicies",
"AdvancedOptions",
"CognitoOptions",
"DomainName",
"EBSOptions",
"ElasticsearchClusterConfig",
"ElasticsearchVersion",
"EncryptionAtRestOptions",
"LogPublishingOptions",
"NodeToNodeEncryptionOptions",
"SnapshotOptions",
"VPCOptions",
),
},
{"function": "add_tags", "parameters": es_add_tags_params},
],
"delete": {
"function": "delete_elasticsearch_domain",
"parameters": {"DomainName": "DomainName"},
},
},
"Lambda::Version": {
"create": {
"function": "publish_version",
"parameters": select_parameters("FunctionName", "CodeSha256", "Description"),
}
},
"Lambda::EventSourceMapping": {
"create": {
"function": "create_event_source_mapping",
"parameters": select_parameters(
"FunctionName",
"EventSourceArn",
"Enabled",
"StartingPosition",
"BatchSize",
"StartingPositionTimestamp",
),
}
},
"DynamoDB::Table": {
"create": [
{
"function": "create_table",
"parameters": {
"TableName": "TableName",
"AttributeDefinitions": "AttributeDefinitions",
"KeySchema": "KeySchema",
"ProvisionedThroughput": get_ddb_provisioned_throughput,
"LocalSecondaryIndexes": "LocalSecondaryIndexes",
"GlobalSecondaryIndexes": get_ddb_global_sec_indexes,
"StreamSpecification": lambda params, **kwargs: (
common.merge_dicts(
params.get("StreamSpecification"),
{"StreamEnabled": True},
default=None,
)
),
},
"defaults": {
"ProvisionedThroughput": {
"ReadCapacityUnits": 5,
"WriteCapacityUnits": 5,
}
},
},
{
"function": "enable_kinesis_streaming_destination",
"parameters": get_ddb_kinesis_stream_specification,
},
],
"delete": {
"function": "delete_table",
"parameters": {"TableName": "TableName"},
},
},
"IAM::Role": {
"create": {
"function": "create_role",
"parameters": param_defaults(
dump_json_params(
select_parameters(
"Path",
"RoleName",
"AssumeRolePolicyDocument",
"Description",
"MaxSessionDuration",
"PermissionsBoundary",
"Tags",
),
"AssumeRolePolicyDocument",
),
{"RoleName": PLACEHOLDER_RESOURCE_NAME},
),
},
"delete": {"function": "delete_role", "parameters": {"RoleName": "RoleName"}},
},
"ApiGateway::Method": {
"create": {
"function": "put_method",
"parameters": {
"restApiId": "RestApiId",
"resourceId": "ResourceId",
"httpMethod": "HttpMethod",
"authorizationType": "AuthorizationType",
"authorizerId": "AuthorizerId",
"requestParameters": "RequestParameters",
},
}
},
"ApiGateway::Method::Integration": {},
"ApiGateway::Account": {},
"ApiGateway::Model": {
"create": {
"function": "create_model",
"parameters": {
"name": "Name",
"restApiId": "RestApiId",
},
"defaults": {"contentType": "application/json"},
}
},
"ApiGateway::Deployment": {
"create": {
"function": "create_deployment",
"parameters": {
"restApiId": "RestApiId",
"stageName": "StageName",
"stageDescription": "StageDescription",
"description": "Description",
},
}
},
"ApiGateway::GatewayResponse": {
"create": {
"function": "put_gateway_response",
"parameters": {
"restApiId": "RestApiId",
"responseType": "ResponseType",
"statusCode": "StatusCode",
"responseParameters": "ResponseParameters",
"responseTemplates": "ResponseTemplates",
},
}
},
"StepFunctions::StateMachine": {
"create": {
"function": "create_state_machine",
"parameters": {
"name": ["StateMachineName", PLACEHOLDER_RESOURCE_NAME],
"definition": "DefinitionString",
"roleArn": lambda params, **kwargs: get_role_arn(params.get("RoleArn"), **kwargs),
},
},
"delete": {
"function": "delete_state_machine",
"parameters": {"stateMachineArn": "PhysicalResourceId"},
},
},
"StepFunctions::Activity": {
"create": {
"function": "create_activity",
"parameters": {"name": ["Name", PLACEHOLDER_RESOURCE_NAME], "tags": "Tags"},
},
"delete": {
"function": "delete_activity",
"parameters": {"activityArn": "PhysicalResourceId"},
},
},
"EC2::Instance": {
"create": {
"function": "create_instances",
"parameters": {
"InstanceType": "InstanceType",
"SecurityGroups": "SecurityGroups",
"KeyName": "KeyName",
"ImageId": "ImageId",
},
"defaults": {"MinCount": 1, "MaxCount": 1},
},
"delete": {
"function": "terminate_instances",
"parameters": {
"InstanceIds": lambda params, **kw: [
kw["resources"][kw["resource_id"]]["PhysicalResourceId"]
]
},
},
},
}
# ----------------
# UTILITY METHODS
# ----------------
def get_secret_arn(secret_name, account_id=None):
# TODO: create logic to create static without lookup table!
from localstack.services.secretsmanager import secretsmanager_starter
storage = secretsmanager_starter.SECRET_ARN_STORAGE
key = "%s_%s" % (aws_stack.get_region(), secret_name)
return storage.get(key) or storage.get(secret_name)
def retrieve_topic_arn(topic_name):
topics = aws_stack.connect_to_service("sns").list_topics()["Topics"]
topic_arns = [t["TopicArn"] for t in topics if t["TopicArn"].endswith(":%s" % topic_name)]
return topic_arns[0]
def get_role_arn(role_arn, **kwargs):
role_arn = resolve_refs_recursively(kwargs.get("stack_name"), role_arn, kwargs.get("resources"))
return aws_stack.role_arn(role_arn)
def find_stack(stack_name):
from localstack.services.cloudformation.cloudformation_api import find_stack as api_find_stack
return api_find_stack(stack_name)
# ---------------------
# CF TEMPLATE HANDLING
# ---------------------
def get_deployment_config(res_type):
result = RESOURCE_TO_FUNCTION.get(res_type)
if result is not None:
return result
canonical_type = canonical_resource_type(res_type)
resource_class = RESOURCE_MODELS.get(canonical_type)
if resource_class:
return resource_class.get_deploy_templates()
def get_resource_type(resource):
res_type = resource.get("ResourceType") or resource.get("Type") or ""
parts = res_type.split("::", 1)
if len(parts) == 1:
return parts[0]
return parts[1]
def get_service_name(resource):
res_type = resource.get("Type", resource.get("ResourceType", ""))
parts = res_type.split("::")
if len(parts) == 1:
return None
if res_type.endswith("Cognito::UserPool"):
return "cognito-idp"
if parts[-2] == "Cognito":
return "cognito-idp"
if parts[-2] == "Elasticsearch":
return "es"
if parts[-2] == "KinesisFirehose":
return "firehose"
if parts[-2] == "ResourceGroups":
return "resource-groups"
if parts[-2] == "CertificateManager":
return "acm"
return parts[1].lower()
def get_resource_name(resource):
properties = resource.get("Properties") or {}
name = properties.get("Name")
if name:
return name
# try to extract name via resource class
res_type = canonical_resource_type(get_resource_type(resource))
model_class = RESOURCE_MODELS.get(res_type)
if model_class:
instance = model_class(resource)
name = instance.get_resource_name()
if not name:
LOG.debug('Unable to extract name for resource type "%s"' % res_type)
return name
def get_client(resource, func_config):
resource_type = get_resource_type(resource)
service = get_service_name(resource)
resource_config = get_deployment_config(resource_type)
if resource_config is None:
raise Exception(
"CloudFormation deployment for resource type %s not yet implemented" % resource_type
)
try:
if func_config.get("boto_client") == "resource":
return aws_stack.connect_to_resource(service)
return aws_stack.connect_to_service(service)
except Exception as e:
LOG.warning('Unable to get client for "%s" API, skipping deployment: %s' % (service, e))
return None
def describe_stack_resource(stack_name, logical_resource_id):
client = aws_stack.connect_to_service("cloudformation")
try:
result = client.describe_stack_resource(
StackName=stack_name, LogicalResourceId=logical_resource_id
)
return result["StackResourceDetail"]
except Exception as e:
LOG.warning(
'Unable to get details for resource "%s" in CloudFormation stack "%s": %s'
% (logical_resource_id, stack_name, e)
)
def retrieve_resource_details(resource_id, resource_status, resources, stack_name):
resource = resources.get(resource_id)
resource_id = resource_status.get("PhysicalResourceId") or resource_id
if not resource:
resource = {}
resource_type = get_resource_type(resource)
resource_props = resource.get("Properties")
if resource_props is None:
raise Exception(
'Unable to find properties for resource "%s": %s %s'
% (resource_id, resource, resources)
)
try:
# try to look up resource class
canonical_type = canonical_resource_type(resource_type)
resource_class = RESOURCE_MODELS.get(canonical_type)
if resource_class:
instance = resource_class(resource)
state = instance.fetch_and_update_state(stack_name=stack_name, resources=resources)
return state
# special case for stack parameters
if resource_type == "Parameter":
return resource_props
# fallback: try accessing stack.moto_resource_statuses
stack = find_stack(stack_name)
moto_resource = stack.moto_resource_statuses.get(resource_id)
if moto_resource:
return moto_resource
# if is_deployable_resource(resource):
LOG.warning(
"Unexpected resource type %s when resolving references of resource %s: %s"
% (resource_type, resource_id, resource)
)
except DependencyNotYetSatisfied:
return
except Exception as e:
check_not_found_exception(e, resource_type, resource, resource_status)
return None
def check_not_found_exception(e, resource_type, resource, resource_status=None):
# we expect this to be a "not found" exception
markers = [
"NoSuchBucket",
"ResourceNotFound",
"NoSuchEntity",
"NotFoundException",
"404",
"not found",
"not exist",
]
if not list(filter(lambda marker, e=e: marker in str(e), markers)):
LOG.warning(
"Unexpected error retrieving details for resource type %s: Exception: %s - %s - status: %s"
% (resource_type, e, resource, resource_status)
)
return False
return True
def extract_resource_attribute(
resource_type,
resource_state,
attribute,
resource_id=None,
resource=None,
resources=None,
stack_name=None,
):
LOG.debug("Extract resource attribute: %s %s" % (resource_type, attribute))
is_ref_attribute = attribute in ["PhysicalResourceId", "Ref"]
is_ref_attr_or_arn = is_ref_attribute or attribute == "Arn"
resource = resource or {}
if not resource and resources:
resource = resources[resource_id]
if not resource_state:
resource_state = retrieve_resource_details(resource_id, {}, resources, stack_name) or {}
if not resource_state:
raise DependencyNotYetSatisfied(
resource_ids=resource_id,
message='Unable to fetch details for resource "%s" (attribute "%s")'
% (resource_id, attribute),
)
if isinstance(resource_state, MotoCloudFormationModel):
if is_ref_attribute:
res_phys_id = getattr(resource_state, "physical_resource_id", None)
get_res_phys_id = getattr(resource_state, "get_physical_resource_id", None)
if not res_phys_id and get_res_phys_id:
res_phys_id = get_res_phys_id(attribute)
if res_phys_id:
return res_phys_id
if hasattr(resource_state, "get_cfn_attribute"):
try:
return resource_state.get_cfn_attribute(attribute)
except Exception:
pass
raise Exception(
'Unable to extract attribute "%s" from "%s" model class %s'
% (attribute, resource_type, type(resource_state))
)
# extract resource specific attributes
resource_props = resource.get("Properties", {})
if resource_type == "Parameter":
result = None
param_value = resource_props.get(
"Value",
resource.get("Value", resource_props.get("Properties", {}).get("Value")),
)
if is_ref_attr_or_arn:
result = param_value
elif isinstance(param_value, dict):
result = param_value.get(attribute)
if result is not None:
return result
return ""
elif resource_type == "Lambda::Function":
func_configs = resource_state.get("Configuration") or {}
if is_ref_attr_or_arn:
func_arn = func_configs.get("FunctionArn")
if func_arn:
return resolve_refs_recursively(stack_name, func_arn, resources)
func_name = resolve_refs_recursively(
stack_name, func_configs.get("FunctionName"), resources
)
return aws_stack.lambda_function_arn(func_name)
else:
return func_configs.get(attribute)
elif resource_type == "Lambda::Version":
if resource_state.get("Version"):
return "%s:%s" % (
resource_state.get("FunctionArn"),
resource_state.get("Version").split(":")[-1],
)
elif resource_type == "DynamoDB::Table":
actual_attribute = "LatestStreamArn" if attribute == "StreamArn" else attribute
value = resource_state.get("Table", {}).get(actual_attribute)
if value:
return value
elif resource_type == "ApiGateway::RestApi":
if is_ref_attribute:
result = resource_state.get("id")
if result:
return result
if attribute == "RootResourceId":
api_id = resource_state["id"]
resources = aws_stack.connect_to_service("apigateway").get_resources(restApiId=api_id)[
"items"
]
for res in resources:
if res["path"] == "/" and not res.get("parentId"):
return res["id"]
elif resource_type == "ApiGateway::Resource":
if is_ref_attribute:
return resource_state.get("id")
elif resource_type == "ApiGateway::Deployment":
if is_ref_attribute:
return resource_state.get("id")
elif resource_type == "S3::Bucket":
if attribute == "WebsiteURL":
bucket_name = resource_props.get("BucketName")
return f"http://{bucket_name}.{S3_STATIC_WEBSITE_HOSTNAME}"
if is_ref_attr_or_arn:
bucket_name = resource_props.get("BucketName")
bucket_name = resolve_refs_recursively(stack_name, bucket_name, resources)
if attribute == "Arn":
return aws_stack.s3_bucket_arn(bucket_name)
return bucket_name
elif resource_type == "Elasticsearch::Domain":
if attribute == "DomainEndpoint":
domain_status = resource_state.get("DomainStatus", {})
result = domain_status.get("Endpoint")
if result:
return result
if attribute in ["Arn", "DomainArn"]:
domain_name = resource_props.get("DomainName") or resource_state.get("DomainName")
return aws_stack.es_domain_arn(domain_name)
elif resource_type == "StepFunctions::StateMachine":
if is_ref_attr_or_arn:
return resource_state["stateMachineArn"]
elif resource_type == "SNS::Topic":
if is_ref_attribute and resource_state.get("TopicArn"):
topic_arn = resource_state.get("TopicArn")
return resolve_refs_recursively(stack_name, topic_arn, resources)
elif resource_type == "SQS::Queue":
if is_ref_attr_or_arn:
if attribute == "Arn" and resource_state.get("QueueArn"):
return resolve_refs_recursively(
stack_name, resource_state.get("QueueArn"), resources
)
return aws_stack.get_sqs_queue_url(resource_props.get("QueueName"))
attribute_lower = common.first_char_to_lower(attribute)
result = resource_state.get(attribute) or resource_state.get(attribute_lower)
if result is None and isinstance(resource, dict):
result = resource_props.get(attribute) or resource_props.get(attribute_lower)
if result is None:
result = get_attr_from_model_instance(
resource,
attribute,
resource_type=resource_type,
resource_id=resource_id,
)
if is_ref_attribute:
for attr in ["Id", "PhysicalResourceId", "Ref"]:
if result is None:
for obj in [resource_state, resource]:
result = result or obj.get(attr)
return result
def canonical_resource_type(resource_type):
if "::" in resource_type and not resource_type.startswith("AWS::"):
resource_type = "AWS::%s" % resource_type
return resource_type
def get_attr_from_model_instance(resource, attribute, resource_type, resource_id=None):
resource_type = canonical_resource_type(resource_type)
# TODO: remove moto.MODEL_MAP here
model_class = RESOURCE_MODELS.get(resource_type) or parsing.MODEL_MAP.get(resource_type)
if not model_class:
if resource_type not in ["AWS::Parameter", "Parameter"]:
LOG.debug('Unable to find model class for resource type "%s"' % resource_type)
return
try:
inst = model_class(resource_name=resource_id, resource_json=resource)
return inst.get_cfn_attribute(attribute)
except Exception:
pass
def resolve_ref(stack_name, ref, resources, attribute):
if ref == "AWS::Region":
return aws_stack.get_region()
if ref == "AWS::Partition":
return "aws"
if ref == "AWS::StackName":
return stack_name
if ref == "AWS::StackId":
# TODO return proper stack id!
return stack_name
if ref == "AWS::AccountId":
return TEST_AWS_ACCOUNT_ID
if ref == "AWS::NoValue":
return PLACEHOLDER_AWS_NO_VALUE
if ref == "AWS::NotificationARNs":
# TODO!
return {}
if ref == "AWS::URLSuffix":
return AWS_URL_SUFFIX
is_ref_attribute = attribute in ["Ref", "PhysicalResourceId", "Arn"]
if is_ref_attribute:
resolve_refs_recursively(stack_name, resources.get(ref, {}), resources)
return determine_resource_physical_id(
resource_id=ref,
resources=resources,
attribute=attribute,
stack_name=stack_name,
)
if resources.get(ref):
if isinstance(resources[ref].get(attribute), (str, int, float, bool, dict)):
return resources[ref][attribute]
# fetch resource details
resource_new = retrieve_resource_details(ref, {}, resources, stack_name)
if not resource_new:
raise DependencyNotYetSatisfied(
resource_ids=ref,
message='Unable to fetch details for resource "%s" (resolving attribute "%s")'
% (ref, attribute),
)
resource = resources.get(ref)
resource_type = get_resource_type(resource)
result = extract_resource_attribute(
resource_type,
resource_new,
attribute,
resource_id=ref,
resource=resource,
resources=resources,
stack_name=stack_name,
)
if result is None:
LOG.warning(
'Unable to extract reference attribute "%s" from resource: %s %s'
% (attribute, resource_new, resource)
)
return result
# Using a @prevent_stack_overflow decorator here to avoid infinite recursion
# in case we load stack exports that have circula dependencies (see issue 3438)
# TODO: Potentially think about a better approach in the future
@prevent_stack_overflow(match_parameters=True)
def resolve_refs_recursively(stack_name, value, resources):
if isinstance(value, dict):
keys_list = list(value.keys())
stripped_fn_lower = keys_list[0].lower().split("::")[-1] if len(keys_list) == 1 else None
# process special operators
if keys_list == ["Ref"]:
ref = resolve_ref(stack_name, value["Ref"], resources, attribute="Ref")
if ref is None:
msg = 'Unable to resolve Ref for resource "%s" (yet)' % value["Ref"]
LOG.debug("%s - %s" % (msg, resources.get(value["Ref"]) or set(resources.keys())))
raise DependencyNotYetSatisfied(resource_ids=value["Ref"], message=msg)
ref = resolve_refs_recursively(stack_name, ref, resources)
return ref
if stripped_fn_lower == "getatt":
attr_ref = value[keys_list[0]]
attr_ref = attr_ref.split(".") if isinstance(attr_ref, str) else attr_ref
return resolve_ref(stack_name, attr_ref[0], resources, attribute=attr_ref[1])
if stripped_fn_lower == "join":
join_values = value[keys_list[0]][1]
join_values = [resolve_refs_recursively(stack_name, v, resources) for v in join_values]
none_values = [v for v in join_values if v is None]
if none_values:
raise Exception(
"Cannot resolve CF fn::Join %s due to null values: %s" % (value, join_values)
)
return value[keys_list[0]][0].join([str(v) for v in join_values])
if stripped_fn_lower == "sub":
item_to_sub = value[keys_list[0]]
attr_refs = dict([(r, {"Ref": r}) for r in STATIC_REFS])
if not isinstance(item_to_sub, list):
item_to_sub = [item_to_sub, {}]
result = item_to_sub[0]
item_to_sub[1].update(attr_refs)
for key, val in item_to_sub[1].items():
val = resolve_refs_recursively(stack_name, val, resources)
result = result.replace("${%s}" % key, val)
# resolve placeholders
result = resolve_placeholders_in_string(
result, stack_name=stack_name, resources=resources
)
return result
if stripped_fn_lower == "findinmap":
attr = resolve_refs_recursively(stack_name, value[keys_list[0]][1], resources)
result = resolve_ref(stack_name, value[keys_list[0]][0], resources, attribute=attr)
if not result:
raise Exception(
"Cannot resolve fn::FindInMap: %s %s"
% (value[keys_list[0]], list(resources.keys()))
)
key = value[keys_list[0]][2]
if not isinstance(key, str):
key = resolve_refs_recursively(stack_name, key, resources)
return result.get(key)
if stripped_fn_lower == "importvalue":
import_value_key = resolve_refs_recursively(stack_name, value[keys_list[0]], resources)
stack = find_stack(stack_name)
stack_export = stack.exports_map.get(import_value_key) or {}
if not stack_export.get("Value"):
LOG.info(
'Unable to find export "%s" in stack "%s", existing export names: %s'
% (import_value_key, stack_name, list(stack.exports_map.keys()))
)
return None
return stack_export["Value"]
if stripped_fn_lower == "if":
condition, option1, option2 = value[keys_list[0]]
condition = evaluate_condition(stack_name, condition, resources)
return resolve_refs_recursively(
stack_name, option1 if condition else option2, resources
)
if stripped_fn_lower == "not":
condition = value[keys_list[0]][0]
condition = resolve_refs_recursively(stack_name, condition, resources)
return not condition
if stripped_fn_lower == "equals":
operand1, operand2 = value[keys_list[0]]
operand1 = resolve_refs_recursively(stack_name, operand1, resources)
operand2 = resolve_refs_recursively(stack_name, operand2, resources)
return str(operand1) == str(operand2)
if stripped_fn_lower == "select":
index, values = value[keys_list[0]]
index = resolve_refs_recursively(stack_name, index, resources)
values = resolve_refs_recursively(stack_name, values, resources)
return values[index]
if stripped_fn_lower == "split":
delimiter, string = value[keys_list[0]]
delimiter = resolve_refs_recursively(stack_name, delimiter, resources)
string = resolve_refs_recursively(stack_name, string, resources)
return string.split(delimiter)
if stripped_fn_lower == "getazs":
region = (
resolve_refs_recursively(stack_name, value["Fn::GetAZs"], resources)
or aws_stack.get_region()
)
azs = []
for az in ("a", "b", "c", "d"):
azs.append("%s%s" % (region, az))
return azs
if stripped_fn_lower == "base64":
value_to_encode = value[keys_list[0]]
value_to_encode = resolve_refs_recursively(stack_name, value_to_encode, resources)
return to_str(base64.b64encode(to_bytes(value_to_encode)))
for key, val in dict(value).items():
value[key] = resolve_refs_recursively(stack_name, val, resources)
if isinstance(value, list):
for i in range(len(value)):
value[i] = resolve_refs_recursively(stack_name, value[i], resources)
return value
def resolve_placeholders_in_string(result, stack_name=None, resources=None):
def _replace(match):
parts = match.group(1).split(".")
if len(parts) >= 2:
resource_name, _, attr_name = match.group(1).partition(".")
resolved = resolve_ref(
stack_name, resource_name.strip(), resources, attribute=attr_name.strip()
)
if resolved is None:
raise DependencyNotYetSatisfied(
resource_ids=resource_name,
message="Unable to resolve attribute ref %s" % match.group(1),
)
return resolved
if len(parts) == 1 and parts[0] in resources:
resource_json = resources[parts[0]]
result = extract_resource_attribute(
resource_json.get("Type"),
{},
"Ref",
resources=resources,
resource_id=parts[0],
stack_name=stack_name,
)
if result is None:
raise DependencyNotYetSatisfied(
resource_ids=parts[0],
message="Unable to resolve attribute ref %s" % match.group(1),
)
return result
# TODO raise exception here?
return match.group(0)
regex = r"\$\{([^\}]+)\}"
result = re.sub(regex, _replace, result)
return result
def evaluate_condition(stack_name, condition, resources):
condition = resolve_refs_recursively(stack_name, condition, resources)
condition = resolve_ref(stack_name, condition, resources, attribute="Ref")
condition = resolve_refs_recursively(stack_name, condition, resources)
return condition
def evaluate_resource_condition(resource, stack_name, resources):
condition = resource.get("Condition")
if condition:
condition = evaluate_condition(stack_name, condition, resources)
if condition is False or condition in FALSE_STRINGS or is_none_or_empty_value(condition):
return False
return True
def get_stack_parameter(stack_name, parameter):
try:
client = aws_stack.connect_to_service("cloudformation")
stack = client.describe_stacks(StackName=stack_name)["Stacks"]
except Exception:
return None
stack = stack and stack[0]
if not stack:
return None
result = [p["ParameterValue"] for p in stack["Parameters"] if p["ParameterKey"] == parameter]
return (result or [None])[0]
def update_resource(resource_id, resources, stack_name):
resource = resources[resource_id]
resource_type = get_resource_type(resource)
if resource_type not in UPDATEABLE_RESOURCES:
LOG.warning('Unable to update resource type "%s", id "%s"' % (resource_type, resource_id))
return
LOG.info("Updating resource %s of type %s" % (resource_id, resource_type))
canonical_type = canonical_resource_type(resource_type)
resource_class = RESOURCE_MODELS.get(canonical_type)
if resource_class:
instance = resource_class(resource)
return instance.update_resource(resource, stack_name=stack_name, resources=resources)
def fix_account_id_in_arns(params):
def fix_ids(o, **kwargs):
if isinstance(o, dict):
for k, v in o.items():
if common.is_string(v, exclude_binary=True):
o[k] = aws_stack.fix_account_id_in_arns(v)
elif common.is_string(o, exclude_binary=True):
o = aws_stack.fix_account_id_in_arns(o)
return o
result = common.recurse_object(params, fix_ids)
return result
def convert_data_types(func_details, params):
"""Convert data types in the "params" object, with the type defs
specified in the 'types' attribute of "func_details"."""
types = func_details.get("types") or {}
attr_names = types.keys() or []
def cast(_obj, _type):
if _type == bool:
return _obj in ["True", "true", True]
if _type == str:
if isinstance(_obj, bool):
return str(_obj).lower()
return str(_obj)
if _type == int:
return int(_obj)
return _obj
def fix_types(o, **kwargs):
if isinstance(o, dict):
for k, v in o.items():
if k in attr_names:
o[k] = cast(v, types[k])
return o
result = common.recurse_object(params, fix_types)
return result
# TODO remove this method
def prepare_template_body(req_data):
return template_preparer.prepare_template_body(req_data)
def deploy_resource(resource_id, resources, stack_name):
return execute_resource_action(resource_id, resources, stack_name, ACTION_CREATE)
def delete_resource(resource_id, resources, stack_name):
res = resources[resource_id]
res_type = res.get("Type")
if res_type == "AWS::S3::Bucket":
s3_listener.remove_bucket_notification(res["PhysicalResourceId"])
if res_type == "AWS::IAM::Role":
role_name = res.get("PhysicalResourceId") or res.get("Properties", {}).get("RoleName")
try:
iam_client = aws_stack.connect_to_service("iam")
rs = iam_client.list_role_policies(RoleName=role_name)
for policy in rs["PolicyNames"]:
iam_client.delete_role_policy(RoleName=role_name, PolicyName=policy)
rs = iam_client.list_instance_profiles_for_role(RoleName=role_name)
for instance_profile in rs["InstanceProfiles"]:
ip_name = instance_profile["InstanceProfileName"]
iam_client.remove_role_from_instance_profile(
InstanceProfileName=ip_name, RoleName=role_name
)
# iam_client.delete_instance_profile(
# InstanceProfileName=ip_name
# )
except Exception as e:
if "NoSuchEntity" not in str(e):
raise
if res_type == "AWS::EC2::VPC":
state = res[KEY_RESOURCE_STATE]
physical_resource_id = res["PhysicalResourceId"] or state.get("VpcId")
res["PhysicalResourceId"] = physical_resource_id
if state.get("VpcId"):
ec2_client = aws_stack.connect_to_service("ec2")
resp = ec2_client.describe_route_tables(
Filters=[
{"Name": "vpc-id", "Values": [state.get("VpcId")]},
{"Name": "association.main", "Values": ["false"]},
]
)
for rt in resp["RouteTables"]:
ec2_client.delete_route_table(RouteTableId=rt["RouteTableId"])
if res_type == "AWS::EC2::Subnet":
state = res[KEY_RESOURCE_STATE]
physical_resource_id = res["PhysicalResourceId"] or state["SubnetId"]
res["PhysicalResourceId"] = physical_resource_id
if res_type == "AWS::EC2::RouteTable":
ec2_client = aws_stack.connect_to_service("ec2")
resp = ec2_client.describe_vpcs()
vpcs = [vpc["VpcId"] for vpc in resp["Vpcs"]]
vpc_id = res.get("Properties", {}).get("VpcId")
if vpc_id not in vpcs:
# VPC already deleted before
return
return execute_resource_action(resource_id, resources, stack_name, ACTION_DELETE)
def execute_resource_action_fallback(
action_name, resource_id, resources, stack_name, resource, resource_type
):
# using moto as fallback for now - TODO remove in the future!
msg = 'Action "%s" for resource type %s not yet implemented' % (
action_name,
resource_type,
)
long_type = canonical_resource_type(resource_type)
clazz = parsing.MODEL_MAP.get(long_type)
if not clazz:
LOG.warning(msg)
return
LOG.info("%s - using fallback mechanism" % msg)
if action_name == ACTION_CREATE:
resource_name = get_resource_name(resource) or resource_id
result = clazz.create_from_cloudformation_json(
resource_name, resource, aws_stack.get_region()
)
return result
def execute_resource_action(resource_id, resources, stack_name, action_name):
resource = resources[resource_id]
resource_type = get_resource_type(resource)
func_details = get_deployment_config(resource_type)
if not func_details or action_name not in func_details:
if resource_type in ["Parameter"]:
return
return execute_resource_action_fallback(
action_name, resource_id, resources, stack_name, resource, resource_type
)
LOG.debug(
'Running action "%s" for resource type "%s" id "%s"'
% (action_name, resource_type, resource_id)
)
func_details = func_details[action_name]
func_details = func_details if isinstance(func_details, list) else [func_details]
results = []
for func in func_details:
if callable(func["function"]):
result = func["function"](resource_id, resources, resource_type, func, stack_name)
results.append(result)
continue
client = get_client(resource, func)
if client:
result = configure_resource_via_sdk(
resource_id, resources, resource_type, func, stack_name, action_name
)
results.append(result)
return (results or [None])[0]
def fix_resource_props_for_sdk_deployment(resource_type, resource_props):
if resource_type == "Lambda::Function":
# Properties will be validated by botocore before sending request to AWS
# botocore/data/lambda/2015-03-31/service-2.json:1161 (EnvironmentVariableValue)
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-function-environment.html
if "Environment" in resource_props:
environment_variables = resource_props["Environment"].get("Variables", {})
resource_props["Environment"]["Variables"] = {
k: str(v) for k, v in environment_variables.items()
}
if resource_type == "SQS::Queue":
# https://github.com/localstack/localstack/issues/3004
if "ReceiveMessageWaitTimeSeconds" in resource_props:
resource_props["ReceiveMessageWaitTimeSeconds"] = int(
resource_props["ReceiveMessageWaitTimeSeconds"]
)
if resource_type == "KMS::Key":
resource_props["KeyPolicy"] = json.dumps(resource_props.get("KeyPolicy", {}))
resource_props["Enabled"] = resource_props.get("Enabled", True)
resource_props["EnableKeyRotation"] = resource_props.get("EnableKeyRotation", False)
resource_props["Description"] = resource_props.get("Description", "")
def configure_resource_via_sdk(
resource_id, resources, resource_type, func_details, stack_name, action_name
):
resource = resources[resource_id]
if resource_type == "EC2::Instance":
if action_name == "create":
func_details["boto_client"] = "resource"
client = get_client(resource, func_details)
function = getattr(client, func_details["function"])
params = func_details.get("parameters") or lambda_get_params()
defaults = func_details.get("defaults", {})
resource_props = resource["Properties"] = resource.get("Properties", {})
resource_props = dict(resource_props)
resource_state = resource.get(KEY_RESOURCE_STATE, {})
# Validate props for each resource type
fix_resource_props_for_sdk_deployment(resource_type, resource_props)
if callable(params):
params = params(
resource_props,
stack_name=stack_name,
resources=resources,
resource_id=resource_id,
)
else:
# it could be a list like ['param1', 'param2', {'apiCallParamName': 'cfResourcePropName'}]
if isinstance(params, list):
_params = {}
for param in params:
if isinstance(param, dict):
_params.update(param)
else:
_params[param] = param
params = _params
params = dict(params)
for param_key, prop_keys in dict(params).items():
params.pop(param_key, None)
if not isinstance(prop_keys, list):
prop_keys = [prop_keys]
for prop_key in prop_keys:
if prop_key == PLACEHOLDER_RESOURCE_NAME:
params[param_key] = PLACEHOLDER_RESOURCE_NAME
else:
if callable(prop_key):
prop_value = prop_key(
resource_props,
stack_name=stack_name,
resources=resources,
resource_id=resource_id,
)
else:
prop_value = resource_props.get(
prop_key,
resource.get(prop_key, resource_state.get(prop_key)),
)
if prop_value is not None:
params[param_key] = prop_value
break
# replace PLACEHOLDER_RESOURCE_NAME in params
resource_name_holder = {}
def fix_placeholders(o, **kwargs):
if isinstance(o, dict):
for k, v in o.items():
if v == PLACEHOLDER_RESOURCE_NAME:
if "value" not in resource_name_holder:
resource_name_holder["value"] = get_resource_name(resource) or resource_id
o[k] = resource_name_holder["value"]
return o
common.recurse_object(params, fix_placeholders)
# assign default values if empty
params = common.merge_recursive(defaults, params)
# this is an indicator that we should skip this resource deployment, and return
if params is None:
return
# convert refs
for param_key, param_value in dict(params).items():
if param_value is not None:
param_value = params[param_key] = resolve_refs_recursively(
stack_name, param_value, resources
)
# convert any moto account IDs (123456789012) in ARNs to our format (000000000000)
params = fix_account_id_in_arns(params)
# convert data types (e.g., boolean strings to bool)
params = convert_data_types(func_details, params)
# remove None values, as they usually raise boto3 errors
params = remove_none_values(params)
# run pre-actions
run_pre_create_actions(action_name, resource_id, resources, resource_type, stack_name, params)
# convert boolean strings
# (TODO: we should find a more reliable mechanism than this opportunistic/probabilistic approach!)
params_before_conversion = copy.deepcopy(params)
for param_key, param_value in dict(params).items():
# Convert to boolean (TODO: do this recursively?)
if str(param_value).lower() in ["true", "false"]:
params[param_key] = str(param_value).lower() == "true"
# invoke function
try:
LOG.debug(
'Request for resource type "%s" in region %s: %s %s'
% (resource_type, aws_stack.get_region(), func_details["function"], params)
)
try:
result = function(**params)
except botocore.exceptions.ParamValidationError as e:
LOG.debug(f"Trying original parameters: {params_before_conversion}")
if "type: <class 'bool'>" not in str(e):
raise
result = function(**params_before_conversion)
except Exception as e:
if action_name == "delete" and check_not_found_exception(e, resource_type, resource):
return
LOG.warning(
"Error calling %s with params: %s for resource: %s" % (function, params, resource)
)
raise e
# run post-actions
run_post_create_actions(action_name, resource_id, resources, resource_type, stack_name, result)
return result
# TODO: move as individual functions to RESOURCE_TO_FUNCTION
def run_pre_create_actions(
action_name, resource_id, resources, resource_type, stack_name, resource_params
):
resource = resources[resource_id]
resource_props = resource["Properties"] = resource.get("Properties", {})
if resource_type == "IAM::Role" and action_name == ACTION_DELETE:
iam = aws_stack.connect_to_service("iam")
role_name = resource_props["RoleName"]
for policy in iam.list_attached_role_policies(RoleName=role_name).get(
"AttachedPolicies", []
):
iam.detach_role_policy(RoleName=role_name, PolicyArn=policy["PolicyArn"])
if resource_type == "S3::Bucket" and action_name == ACTION_DELETE:
s3 = aws_stack.connect_to_service("s3")
bucket_name = resource_props.get("BucketName")
try:
s3.delete_bucket_policy(Bucket=bucket_name)
except Exception:
pass
# TODO: verify whether AWS CF automatically deletes all bucket objects, or fails if bucket is non-empty
try:
delete_all_s3_objects(bucket_name)
except Exception as e:
if "NoSuchBucket" not in str(e):
raise
# hack: make sure the bucket actually exists, to prevent delete_bucket operation later on from failing
s3.create_bucket(Bucket=bucket_name)
# TODO: move as individual functions to RESOURCE_TO_FUNCTION
def run_post_create_actions(action_name, resource_id, resources, resource_type, stack_name, result):
if action_name == ACTION_DELETE:
return result
resource = resources[resource_id]
resource_props = resource["Properties"] = resource.get("Properties", {})
# some resources have attached/nested resources which we need to create recursively now
if resource_type == "ApiGateway::Method":
integration = resource_props.get("Integration")
apigateway = aws_stack.connect_to_service("apigateway")
if integration:
api_id = resolve_refs_recursively(stack_name, resource_props["RestApiId"], resources)
res_id = resolve_refs_recursively(stack_name, resource_props["ResourceId"], resources)
kwargs = {}
if integration.get("Uri"):
uri = resolve_refs_recursively(stack_name, integration.get("Uri"), resources)
# Moto has a validate method on Uri for integration_type "HTTP" | "HTTP_PROXY" that does not accept
# Uri value without path, we need to add path ("/") if not exists
if integration.get("Type") in ["HTTP", "HTTP_PROXY"]:
rs = urlparse(uri)
if not rs.path:
uri = "{}/".format(uri)
kwargs["uri"] = uri
if integration.get("IntegrationHttpMethod"):
kwargs["integrationHttpMethod"] = integration["IntegrationHttpMethod"]
if integration.get("RequestTemplates"):
kwargs["requestTemplates"] = integration["RequestTemplates"]
if integration.get("Credentials"):
kwargs["credentials"] = integration["Credentials"]
apigateway.put_integration(
restApiId=api_id,
resourceId=res_id,
httpMethod=resource_props["HttpMethod"],
type=integration["Type"],
**kwargs,
)
responses = resource_props.get("MethodResponses") or []
for response in responses:
api_id = resolve_refs_recursively(stack_name, resource_props["RestApiId"], resources)
res_id = resolve_refs_recursively(stack_name, resource_props["ResourceId"], resources)
apigateway.put_method_response(
restApiId=api_id,
resourceId=res_id,
httpMethod=resource_props["HttpMethod"],
statusCode=str(response["StatusCode"]),
responseParameters=response.get("ResponseParameters", {}),
)
elif resource_type == "ApiGateway::RestApi":
body = resource_props.get("Body")
if body:
client = aws_stack.connect_to_service("apigateway")
body = json.dumps(body) if isinstance(body, dict) else body
client.put_rest_api(restApiId=result["id"], body=to_bytes(body))
elif resource_type == "SNS::Topic":
subscriptions = resource_props.get("Subscription", [])
for subscription in subscriptions:
if is_none_or_empty_value(subscription):
continue
endpoint = resolve_refs_recursively(stack_name, subscription["Endpoint"], resources)
topic_arn = retrieve_topic_arn(resource_props["TopicName"])
aws_stack.connect_to_service("sns").subscribe(
TopicArn=topic_arn, Protocol=subscription["Protocol"], Endpoint=endpoint
)
elif resource_type == "S3::Bucket":
tags = resource_props.get("Tags")
if tags:
aws_stack.connect_to_service("s3").put_bucket_tagging(
Bucket=resource_props["BucketName"], Tagging={"TagSet": tags}
)
elif resource_type == "IAM::Role":
policies = resource_props.get("Policies", [])
for policy in policies:
policy = policy[0] if isinstance(policy, list) and len(policy) == 1 else policy
iam = aws_stack.connect_to_service("iam")
if policy == PLACEHOLDER_AWS_NO_VALUE:
continue
if not isinstance(policy, dict):
LOG.info(
'Invalid format of policy for IAM role "%s": %s'
% (resource_props.get("RoleName"), policy)
)
continue
pol_name = policy.get("PolicyName")
doc = dict(policy["PolicyDocument"])
doc["Version"] = doc.get("Version") or IAM_POLICY_VERSION
statements = (
doc["Statement"] if isinstance(doc["Statement"], list) else [doc["Statement"]]
)
for statement in statements:
if isinstance(statement.get("Resource"), list):
# filter out empty resource strings
statement["Resource"] = [r for r in statement["Resource"] if r]
doc = json.dumps(doc)
LOG.debug(
"Running put_role_policy(...) for IAM::Role policy: %s %s %s"
% (resource_props["RoleName"], pol_name, doc)
)
iam.put_role_policy(
RoleName=resource_props["RoleName"],
PolicyName=pol_name,
PolicyDocument=doc,
)
elif resource_type == "IAM::Policy":
# associate policies with users, groups, roles
groups = resource_props.get("Groups", [])
roles = resource_props.get("Roles", [])
users = resource_props.get("Users", [])
policy_arn = aws_stack.policy_arn(resource_props.get("PolicyName"))
iam = aws_stack.connect_to_service("iam")
for group in groups:
iam.attach_group_policy(GroupName=group, PolicyArn=policy_arn)
for role in roles:
iam.attach_role_policy(RoleName=role, PolicyArn=policy_arn)
for user in users:
iam.attach_user_policy(UserName=user, PolicyArn=policy_arn)
elif resource_type == "IAM::InstanceProfile":
if resource_props.get("Roles", []):
iam = aws_stack.connect_to_service("iam")
iam.add_role_to_instance_profile(
InstanceProfileName=resource_props["InstanceProfileName"],
RoleName=resource_props["Roles"][0],
)
elif resource_type == "IAM::User":
iam = aws_stack.connect_to_service("iam")
username = resource_props.get("UserName")
for group in resource_props.get("Groups", []):
iam.add_user_to_group(UserName=username, GroupName=group)
for managed_policy in resource_props.get("ManagedPolicyArns", []):
iam.attach_user_policy(UserName=username, PolicyArn=managed_policy)
for policy in resource_props.get("Policies", []):
policy_doc = json.dumps(policy.get("PolicyDocument"))
iam.put_user_policy(
UserName=username, PolicyName=policy.get("PolicyName"), PolicyDocument=policy_doc
)
login_profile = resource_props.get("LoginProfile")
if login_profile:
iam.create_login_profile(
UserName=username,
Password=login_profile.get("Password"),
PasswordResetRequired=login_profile.get("PasswordResetRequired"),
)
def get_action_name_for_resource_change(res_change):
return {"Add": "CREATE", "Remove": "DELETE", "Modify": "UPDATE"}.get(res_change)
def is_none_or_empty_value(value):
return not value or value == PLACEHOLDER_AWS_NO_VALUE
# TODO: this shouldn't be called for stack parameters
def determine_resource_physical_id(
resource_id, resources=None, stack=None, attribute=None, stack_name=None
):
resources = resources or stack.resources
stack_name = stack_name or stack.stack_name
resource = resources.get(resource_id, {})
if not resource:
return
resource_type = resource.get("Type") or ""
resource_type = re.sub("^AWS::", "", resource_type)
resource_props = resource.get("Properties", {})
# determine result from resource class
canonical_type = canonical_resource_type(resource_type)
resource_class = RESOURCE_MODELS.get(canonical_type)
if resource_class:
resource_inst = resource_class(resource)
resource_inst.fetch_state_if_missing(stack_name=stack_name, resources=resources)
result = resource_inst.get_physical_resource_id(attribute=attribute)
if result:
return result
# TODO: put logic into resource-specific model classes
if resource_type == "ApiGateway::RestApi":
result = resource_props.get("id")
if result:
return result
elif resource_type == "ApiGateway::Stage":
return resource_props.get("StageName")
elif resource_type == "AppSync::DataSource":
return resource_props.get("DataSourceArn")
elif resource_type == "KinesisFirehose::DeliveryStream":
return aws_stack.firehose_stream_arn(resource_props.get("DeliveryStreamName"))
elif resource_type == "StepFunctions::StateMachine":
return aws_stack.state_machine_arn(
resource_props.get("StateMachineName")
) # returns ARN in AWS
elif resource_type == "S3::Bucket":
if attribute == "Arn":
return aws_stack.s3_bucket_arn(resource_props.get("BucketName"))
return resource_props.get("BucketName") # Note: "Ref" returns bucket name in AWS
elif resource_type == "IAM::Role":
if attribute == "Arn":
return aws_stack.role_arn(resource_props.get("RoleName"))
return resource_props.get("RoleName")
elif resource_type == "SecretsManager::Secret":
arn = get_secret_arn(resource_props.get("Name")) or ""
if attribute == "Arn":
return arn
return arn.split(":")[-1]
elif resource_type == "IAM::Policy":
if attribute == "Arn":
return aws_stack.policy_arn(resource_props.get("PolicyName"))
return resource_props.get("PolicyName")
elif resource_type == "DynamoDB::Table":
table_name = resource_props.get("TableName")
if table_name:
if attribute == "Ref":
return table_name # Note: "Ref" returns table name in AWS
return table_name
elif resource_type == "Logs::LogGroup":
return resource_props.get("LogGroupName")
res_id = resource.get("PhysicalResourceId")
if res_id and attribute in [None, "Ref", "PhysicalResourceId"]:
return res_id
result = extract_resource_attribute(
resource_type,
{},
attribute or "PhysicalResourceId",
stack_name=stack_name,
resource_id=resource_id,
resource=resource,
resources=resources,
)
if result is not None:
# note that value could be an empty string here (in case of Parameter values)
return result
LOG.info(
'Unable to determine PhysicalResourceId for "%s" resource, ID "%s"'
% (resource_type, resource_id)
)
def update_resource_details(stack, resource_id, details, action=None):
resource = stack.resources.get(resource_id, {})
if not resource or not details:
return
# TODO: we need to rethink this method - this should be encapsulated in the resource model classes.
# Also, instead of actively updating the PhysicalResourceId attributes below, they should be
# determined and returned by the resource model classes upon request.
resource_type = resource.get("Type") or ""
resource_type = re.sub("^AWS::", "", resource_type)
resource_props = resource.get("Properties", {})
if resource_type == "ApiGateway::RestApi":
resource_props["id"] = details["id"]
if resource_type == "KMS::Key":
resource["PhysicalResourceId"] = details["KeyMetadata"]["KeyId"]
if resource_type == "EC2::Instance":
if action == "CREATE":
resource["PhysicalResourceId"] = details[0].id
if resource_type == "EC2::SecurityGroup":
resource["PhysicalResourceId"] = details["GroupId"]
if resource_type == "IAM::InstanceProfile":
resource["PhysicalResourceId"] = details["InstanceProfile"]["InstanceProfileName"]
if resource_type == "StepFunctions::Activity":
resource["PhysicalResourceId"] = details["activityArn"]
if resource_type == "ApiGateway::Model":
resource["PhysicalResourceId"] = details["id"]
if resource_type == "EC2::VPC":
resource["PhysicalResourceId"] = details["Vpc"]["VpcId"]
if resource_type == "EC2::Subnet":
resource["PhysicalResourceId"] = details["Subnet"]["SubnetId"]
if resource_type == "EC2::RouteTable":
resource["PhysicalResourceId"] = details["RouteTable"]["RouteTableId"]
if resource_type == "EC2::Route":
resource["PhysicalResourceId"] = generate_route_id(
resource_props["RouteTableId"],
resource_props.get("DestinationCidrBlock", ""),
resource_props.get("DestinationIpv6CidrBlock"),
)
if isinstance(details, MotoCloudFormationModel):
# fallback: keep track of moto resource status
stack.moto_resource_statuses[resource_id] = details
def add_default_resource_props(
resource,
stack_name,
resource_name=None,
resource_id=None,
update=False,
existing_resources=None,
):
"""Apply some fixes to resource props which otherwise cause deployments to fail"""
res_type = resource["Type"]
props = resource["Properties"] = resource.get("Properties", {})
existing_resources = existing_resources or {}
def _generate_res_name():
return "%s-%s-%s" % (stack_name, resource_name or resource_id, short_uid())
# TODO: move logic below into resource classes!
if res_type == "AWS::Lambda::EventSourceMapping" and not props.get("StartingPosition"):
props["StartingPosition"] = "LATEST"
elif res_type == "AWS::Logs::LogGroup" and not props.get("LogGroupName") and resource_name:
props["LogGroupName"] = resource_name
elif res_type == "AWS::Lambda::Function" and not props.get("FunctionName"):
props["FunctionName"] = "{}-lambda-{}".format(stack_name[:45], short_uid())
elif res_type == "AWS::SNS::Topic" and not props.get("TopicName"):
props["TopicName"] = "topic-%s" % short_uid()
elif res_type == "AWS::SQS::Queue" and not props.get("QueueName"):
props["QueueName"] = "queue-%s" % short_uid()
elif res_type == "AWS::SQS::QueuePolicy" and not resource.get("PhysicalResourceId"):
resource["PhysicalResourceId"] = _generate_res_name()
elif res_type == "AWS::IAM::ManagedPolicy" and not resource.get("ManagedPolicyName"):
resource["ManagedPolicyName"] = _generate_res_name()
elif res_type == "AWS::ApiGateway::RestApi" and not props.get("Name"):
props["Name"] = _generate_res_name()
elif res_type == "AWS::ApiGateway::Stage" and not props.get("StageName"):
props["StageName"] = "default"
elif res_type == "AWS::ApiGateway::ApiKey" and not props.get("Name"):
props["Name"] = _generate_res_name()
elif res_type == "AWS::ApiGateway::UsagePlan" and not props.get("UsagePlanName"):
props["UsagePlanName"] = _generate_res_name()
elif res_type == "AWS::ApiGateway::Model" and not props.get("Name"):
props["Name"] = _generate_res_name()
elif res_type == "AWS::ApiGateway::RequestValidator" and not props.get("Name"):
props["Name"] = _generate_res_name()
elif res_type == "AWS::DynamoDB::Table":
update_dynamodb_index_resource(resource)
props["TableName"] = props.get("TableName") or _generate_res_name()
elif res_type == "AWS::CloudWatch::Alarm":
props["AlarmName"] = props.get("AlarmName") or _generate_res_name()
elif res_type == "AWS::SecretsManager::Secret":
props["Name"] = props.get("Name") or _generate_res_name()
elif res_type == "AWS::S3::Bucket" and not props.get("BucketName"):
existing_bucket = existing_resources.get(resource_id) or {}
bucket_name = (
existing_bucket.get("Properties", {}).get("BucketName") or _generate_res_name()
)
props["BucketName"] = s3_listener.normalize_bucket_name(bucket_name)
elif res_type == "AWS::StepFunctions::StateMachine" and not props.get("StateMachineName"):
props["StateMachineName"] = _generate_res_name()
elif res_type == "AWS::CloudFormation::Stack" and not props.get("StackName"):
props["StackName"] = _generate_res_name()
elif res_type == "AWS::EC2::SecurityGroup":
props["GroupName"] = props.get("GroupName") or _generate_res_name()
elif res_type == "AWS::Redshift::Cluster":
props["ClusterIdentifier"] = props.get("ClusterIdentifier") or _generate_res_name()
elif res_type == "AWS::IAM::InstanceProfile":
props["InstanceProfileName"] = props.get("InstanceProfileName") or _generate_res_name()
elif res_type == "AWS::Logs::LogGroup":
props["LogGroupName"] = props.get("LogGroupName") or _generate_res_name()
elif res_type == "AWS::KMS::Key":
tags = props["Tags"] = props.get("Tags", [])
existing = [t for t in tags if t["Key"] == "localstack-key-id"]
if not existing:
# append tags, to allow us to determine in service_models.py whether this key is already deployed
tags.append({"Key": "localstack-key-id", "Value": short_uid()})
# generate default names for certain resource types
default_attrs = (("AWS::IAM::Role", "RoleName"), ("AWS::Events::Rule", "Name"))
for entry in default_attrs:
if res_type == entry[0] and not props.get(entry[1]):
if not resource_id:
resource_id = canonical_json(json_safe(props))
resource_id = md5(resource_id)
props[entry[1]] = "cf-%s-%s" % (stack_name, resource_id)
def update_dynamodb_index_resource(resource):
if resource.get("Properties").get("BillingMode") == "PAY_PER_REQUEST":
for glob_index in resource.get("Properties", {}).get("GlobalSecondaryIndexes", []):
if not glob_index.get("ProvisionedThroughput"):
glob_index["ProvisionedThroughput"] = {
"ReadCapacityUnits": 99,
"WriteCapacityUnits": 99,
}
# -----------------------
# MAIN TEMPLATE DEPLOYER
# -----------------------
class TemplateDeployer(object):
def __init__(self, stack):
self.stack = stack
@property
def resources(self):
return self.stack.resources
@property
def stack_name(self):
return self.stack.stack_name
# ------------------
# MAIN ENTRY POINTS
# ------------------
def deploy_stack(self):
self.stack.set_stack_status("CREATE_IN_PROGRESS")
try:
self.apply_changes(
self.stack,
self.stack,
stack_name=self.stack.stack_name,
initialize=True,
action="CREATE",
)
except Exception as e:
LOG.info("Unable to create stack %s: %s" % (self.stack.stack_name, e))
self.stack.set_stack_status("CREATE_FAILED")
raise
def apply_change_set(self, change_set):
action = "CREATE"
change_set.stack.set_stack_status("%s_IN_PROGRESS" % action)
try:
self.apply_changes(
change_set.stack,
change_set,
stack_name=change_set.stack_name,
action=action,
)
except Exception as e:
LOG.info(
"Unable to apply change set %s: %s" % (change_set.metadata.get("ChangeSetName"), e)
)
change_set.metadata["Status"] = "%s_FAILED" % action
self.stack.set_stack_status("%s_FAILED" % action)
raise
def update_stack(self, new_stack):
self.stack.set_stack_status("UPDATE_IN_PROGRESS")
# apply changes
self.apply_changes(self.stack, new_stack, stack_name=self.stack.stack_name, action="UPDATE")
def delete_stack(self):
if not self.stack:
return
self.stack.set_stack_status("DELETE_IN_PROGRESS")
stack_resources = list(self.stack.resources.values())
stack_name = self.stack.stack_name
resources = dict([(r["LogicalResourceId"], common.clone_safe(r)) for r in stack_resources])
for key, resource in resources.items():
resource["Properties"] = resource.get("Properties", common.clone_safe(resource))
resource["ResourceType"] = resource.get("ResourceType") or resource.get("Type")
for resource_id, resource in resources.items():
# TODO: cache condition value in resource details on deployment and use cached value here
if evaluate_resource_condition(resource, stack_name, resources):
delete_resource(resource_id, resources, stack_name)
self.stack.set_resource_status(resource_id, "DELETE_COMPLETE")
# update status
self.stack.set_stack_status("DELETE_COMPLETE")
# ----------------------------
# DEPENDENCY RESOLUTION UTILS
# ----------------------------
def is_deployable_resource(self, resource):
resource_type = get_resource_type(resource)
entry = get_deployment_config(resource_type)
if entry is None and resource_type not in ["Parameter", None]:
# fall back to moto resource creation (TODO: remove in the future)
long_res_type = canonical_resource_type(resource_type)
if long_res_type in parsing.MODEL_MAP:
return True
LOG.warning('Unable to deploy resource type "%s": %s' % (resource_type, resource))
return bool(entry and entry.get(ACTION_CREATE))
def is_deployed(self, resource):
resource_status = {}
resource_id = resource["LogicalResourceId"]
details = retrieve_resource_details(
resource_id, resource_status, self.resources, self.stack_name
)
return bool(details)
def is_updateable(self, resource):
"""Return whether the given resource can be updated or not."""
if not self.is_deployable_resource(resource) or not self.is_deployed(resource):
return False
resource_type = get_resource_type(resource)
return resource_type in UPDATEABLE_RESOURCES
def all_resource_dependencies_satisfied(self, resource):
unsatisfied = self.get_unsatisfied_dependencies(resource)
return not unsatisfied
def get_unsatisfied_dependencies(self, resource):
res_deps = self.get_resource_dependencies(resource)
return self.get_unsatisfied_dependencies_for_resources(res_deps, resource)
def get_unsatisfied_dependencies_for_resources(
self, resources, depending_resource=None, return_first=True
):
result = {}
for resource_id, resource in iteritems(resources):
if self.is_deployable_resource(resource):
if not self.is_deployed(resource):
LOG.debug(
"Dependency for resource %s not yet deployed: %s %s"
% (depending_resource, resource_id, resource)
)
result[resource_id] = resource
if return_first:
break
return result
def get_resource_dependencies(self, resource):
result = {}
# Note: using the original, unmodified template here to preserve Ref's ...
raw_resources = self.stack.template_original["Resources"]
raw_resource = raw_resources[resource["LogicalResourceId"]]
dumped = json.dumps(common.json_safe(raw_resource))
for other_id, other in raw_resources.items():
if resource != other:
# TODO: traverse dict instead of doing string search!
search1 = '{"Ref": "%s"}' % other_id
search2 = '{"Fn::GetAtt": ["%s", ' % other_id
if search1 in dumped or search2 in dumped:
result[other_id] = other
if other_id in resource.get("DependsOn", []):
result[other_id] = other
return result
# -----------------
# DEPLOYMENT UTILS
# -----------------
def add_default_resource_props(self, resources=None):
resources = resources or self.resources
for resource_id, resource in resources.items():
add_default_resource_props(resource, self.stack_name, resource_id=resource_id)
def init_resource_status(self, resources=None, stack=None, action="CREATE"):
resources = resources or self.resources
stack = stack or self.stack
for resource_id, resource in resources.items():
stack.set_resource_status(resource_id, "%s_IN_PROGRESS" % action)
def update_resource_details(self, resource_id, result, stack=None, action="CREATE"):
stack = stack or self.stack
# update resource state
update_resource_details(stack, resource_id, result, action)
# update physical resource id
resource = stack.resources[resource_id]
physical_id = resource.get("PhysicalResourceId")
physical_id = physical_id or determine_resource_physical_id(resource_id, stack=stack)
if not resource.get("PhysicalResourceId") or action == "UPDATE":
resource["PhysicalResourceId"] = physical_id
# set resource status
stack.set_resource_status(resource_id, "%s_COMPLETE" % action, physical_res_id=physical_id)
return physical_id
def get_change_config(self, action, resource, change_set_id=None):
return {
"Type": "Resource",
"ResourceChange": {
"Action": action,
"LogicalResourceId": resource.get("LogicalResourceId"),
"PhysicalResourceId": resource.get("PhysicalResourceId"),
"ResourceType": resource.get("Type"),
"Replacement": "False",
"ChangeSetId": change_set_id,
},
}
def resource_config_differs(self, resource_new):
"""Return whether the given resource properties differ from the existing config (for stack updates)."""
resource_id = resource_new["LogicalResourceId"]
resource_old = self.resources[resource_id]
props_old = resource_old["Properties"]
props_new = resource_new["Properties"]
ignored_keys = ["LogicalResourceId", "PhysicalResourceId"]
old_keys = set(props_old.keys()) - set(ignored_keys)
new_keys = set(props_new.keys()) - set(ignored_keys)
if old_keys != new_keys:
return True
for key in old_keys:
if props_old[key] != props_new[key]:
return True
old_status = self.stack.resource_states.get(resource_id) or {}
previous_state = (
old_status.get("PreviousResourceStatus") or old_status.get("ResourceStatus") or ""
)
if old_status and "DELETE" in previous_state:
return True
def merge_properties(self, resource_id, old_stack, new_stack):
old_resources = old_stack.template["Resources"]
new_resources = new_stack.template["Resources"]
new_resource = new_resources[resource_id]
old_resource = old_resources[resource_id] = old_resources.get(resource_id) or {}
for key, value in new_resource.items():
if key == "Properties":
continue
old_resource[key] = old_resource.get(key, value)
old_res_props = old_resource["Properties"] = old_resource.get("Properties", {})
for key, value in new_resource["Properties"].items():
old_res_props[key] = value
# overwrite original template entirely
old_stack.template_original["Resources"][resource_id] = new_stack.template_original[
"Resources"
][resource_id]
def resolve_param(
self, logical_id: str, param_type: str, default_value: Optional[str] = None
) -> Optional[str]:
if param_type == "AWS::SSM::Parameter::Value<String>":
ssm_client = aws_stack.connect_to_service("ssm")
param = ssm_client.get_parameter(Name=default_value)
return param["Parameter"]["Value"]
return None
def apply_parameter_changes(self, old_stack, new_stack) -> None:
parameters = {
p["ParameterKey"]: p
for p in old_stack.metadata["Parameters"] # go through current parameter values
}
for logical_id, value in new_stack.template["Parameters"].items():
default = value.get("Default")
provided_param_value = parameters.get(logical_id)
param = {
"ParameterKey": logical_id,
"ParameterValue": provided_param_value if default is None else default,
}
if default is not None:
resolved_value = self.resolve_param(logical_id, value.get("Type"), default)
if resolved_value is not None:
param["ResolvedValue"] = resolved_value
parameters[logical_id] = param
parameters.update({p["ParameterKey"]: p for p in new_stack.metadata["Parameters"]})
for change_set in new_stack.change_sets:
parameters.update({p["ParameterKey"]: p for p in change_set.metadata["Parameters"]})
# TODO: unclear/undocumented behavior in implicitly updating old_stack parameter here
old_stack.metadata["Parameters"] = [v for v in parameters.values() if v]
# TODO: fix circular import with cloudformation_api.py when importing Stack here
def construct_changes(
self,
existing_stack,
new_stack,
initialize=False,
change_set_id=None,
append_to_changeset=False,
):
from localstack.services.cloudformation.cloudformation_api import StackChangeSet
old_resources = existing_stack.template["Resources"]
new_resources = new_stack.template["Resources"]
deletes = [val for key, val in old_resources.items() if key not in new_resources]
adds = [val for key, val in new_resources.items() if initialize or key not in old_resources]
modifies = [val for key, val in new_resources.items() if key in old_resources]
changes = []
for action, items in (("Remove", deletes), ("Add", adds), ("Modify", modifies)):
for item in items:
item["Properties"] = item.get("Properties", {})
change = self.get_change_config(action, item, change_set_id=change_set_id)
changes.append(change)
# append changes to change set
if append_to_changeset and isinstance(new_stack, StackChangeSet):
new_stack.changes.extend(changes)
return changes
def apply_changes(
self,
existing_stack,
new_stack,
stack_name,
change_set_id=None,
initialize=False,
action=None,
):
old_resources = existing_stack.template["Resources"]
new_resources = new_stack.template["Resources"]
action = action or "CREATE"
self.init_resource_status(old_resources, action="UPDATE")
# apply parameter changes to existing stack
self.apply_parameter_changes(existing_stack, new_stack)
# construct changes
changes = self.construct_changes(
existing_stack,
new_stack,
initialize=initialize,
change_set_id=change_set_id,
)
# check if we have actual changes in the stack, and prepare properties
contains_changes = False
for change in changes:
res_action = change["ResourceChange"]["Action"]
resource = new_resources.get(change["ResourceChange"]["LogicalResourceId"])
if res_action != "Modify" or self.resource_config_differs(resource):
contains_changes = True
if res_action in ["Modify", "Add"]:
self.merge_properties(resource["LogicalResourceId"], existing_stack, new_stack)
if not contains_changes:
raise NoStackUpdates("No updates are to be performed.")
# merge stack outputs
existing_stack.template["Outputs"].update(new_stack.template.get("Outputs", {}))
# start deployment loop
return self.apply_changes_in_loop(
changes, existing_stack, stack_name, action=action, new_stack=new_stack
)
def apply_changes_in_loop(self, changes, stack, stack_name, action=None, new_stack=None):
from localstack.services.cloudformation.cloudformation_api import StackChangeSet
def _run(*args):
try:
self.do_apply_changes_in_loop(changes, stack, stack_name)
status = "%s_COMPLETE" % action
except Exception as e:
LOG.debug(
'Error applying changes for CloudFormation stack "%s": %s %s'
% (stack.stack_name, e, traceback.format_exc())
)
status = "%s_FAILED" % action
stack.set_stack_status(status)
if isinstance(new_stack, StackChangeSet):
new_stack.metadata["Status"] = status
new_stack.metadata["ExecutionStatus"] = (
"EXECUTE_FAILED" if "FAILED" in status else "EXECUTE_COMPLETE"
)
new_stack.metadata["StatusReason"] = "Deployment %s" % (
"failed" if "FAILED" in status else "succeeded"
)
# run deployment in background loop, to avoid client network timeouts
return start_worker_thread(_run)
def do_apply_changes_in_loop(self, changes, stack, stack_name):
# apply changes in a retry loop, to resolve resource dependencies and converge to the target state
changes_done = []
max_iters = 30
new_resources = stack.resources
# apply default props before running the loop
for resource_id, resource in new_resources.items():
add_default_resource_props(
resource,
stack.stack_name,
resource_id=resource_id,
existing_resources=new_resources,
)
# start deployment loop
for i in range(max_iters):
j = 0
updated = False
while j < len(changes):
change = changes[j]
res_change = change["ResourceChange"]
action = res_change["Action"]
is_add_or_modify = action in ["Add", "Modify"]
resource_id = res_change["LogicalResourceId"]
try:
if is_add_or_modify:
resource = new_resources[resource_id]
should_deploy = self.prepare_should_deploy_change(
resource_id, change, stack, new_resources
)
LOG.debug(
'Handling "%s" for resource "%s" (%s/%s) type "%s" in loop iteration %s'
% (
action,
resource_id,
j + 1,
len(changes),
res_change["ResourceType"],
i + 1,
)
)
if not should_deploy:
del changes[j]
stack_action = get_action_name_for_resource_change(action)
stack.set_resource_status(resource_id, "%s_COMPLETE" % stack_action)
continue
if not self.all_resource_dependencies_satisfied(resource):
j += 1
continue
self.apply_change(change, stack, new_resources, stack_name=stack_name)
changes_done.append(change)
del changes[j]
updated = True
except DependencyNotYetSatisfied as e:
LOG.debug(
'Dependencies for "%s" not yet satisfied, retrying in next loop: %s'
% (resource_id, e)
)
j += 1
if not changes:
break
if not updated:
raise Exception(
"Resource deployment loop completed, pending resource changes: %s" % changes
)
# clean up references to deleted resources in stack
deletes = [c for c in changes_done if c["ResourceChange"]["Action"] == "Remove"]
for delete in deletes:
stack.template["Resources"].pop(delete["ResourceChange"]["LogicalResourceId"], None)
return changes_done
def prepare_should_deploy_change(self, resource_id, change, stack, new_resources):
resource = new_resources[resource_id]
res_change = change["ResourceChange"]
action = res_change["Action"]
# check resource condition, if present
if not evaluate_resource_condition(resource, stack.stack_name, new_resources):
LOG.debug(
'Skipping deployment of "%s", as resource condition evaluates to false'
% resource_id
)
return
# resolve refs in resource details
resolve_refs_recursively(stack.stack_name, resource, new_resources)
if action in ["Add", "Modify"]:
is_deployed = self.is_deployed(resource)
if action == "Modify" and not is_deployed:
action = res_change["Action"] = "Add"
if action == "Add":
if not self.is_deployable_resource(resource) or is_deployed:
return False
if action == "Modify" and not self.is_updateable(resource):
LOG.debug(
'Action "update" not yet implemented for CF resource type %s'
% resource.get("Type")
)
return False
return True
def apply_change(self, change, old_stack, new_resources, stack_name):
change_details = change["ResourceChange"]
action = change_details["Action"]
resource_id = change_details["LogicalResourceId"]
resource = new_resources[resource_id]
if not evaluate_resource_condition(resource, stack_name, new_resources):
return
# execute resource action
if action == "Add":
result = deploy_resource(resource_id, new_resources, stack_name)
elif action == "Remove":
result = delete_resource(resource_id, old_stack.resources, stack_name)
elif action == "Modify":
result = update_resource(resource_id, new_resources, stack_name)
# update resource status and physical resource id
stack_action = get_action_name_for_resource_change(action)
self.update_resource_details(resource_id, result, stack=old_stack, action=stack_action)
return result
| 1 | 13,022 | FYI: I deployed a stack with multiple lengths for the stack name and resource id to cloudformation, to see how it truncates the parts for the final FunctionName | localstack-localstack | py |
@@ -301,7 +301,6 @@ func (w *WorkloadInitializer) newLoadBalancedWebServiceManifest(i *ServiceProps)
},
Port: i.Port,
HealthCheck: i.HealthCheck,
- AppDomain: i.appDomain,
Path: "/",
}
existingSvcs, err := w.Store.ListServices(i.App) | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package initialize contains methods and structs needed to initialize jobs and services.
package initialize
import (
"encoding"
"fmt"
"os"
"path/filepath"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/aws/copilot-cli/internal/pkg/manifest"
"github.com/aws/copilot-cli/internal/pkg/term/color"
"github.com/aws/copilot-cli/internal/pkg/term/log"
"github.com/aws/copilot-cli/internal/pkg/workspace"
)
const (
fmtAddWlToAppStart = "Creating ECR repositories for %s %s."
fmtAddWlToAppFailed = "Failed to create ECR repositories for %s %s.\n\n"
fmtAddWlToAppComplete = "Created ECR repositories for %s %s.\n\n"
)
const (
jobWlType = "job"
svcWlType = "service"
)
var fmtErrUnrecognizedWlType = "unrecognized workload type %s"
// Store represents the methods needed to add workloads to the SSM parameter store.
type Store interface {
GetApplication(appName string) (*config.Application, error)
CreateService(service *config.Workload) error
CreateJob(job *config.Workload) error
ListServices(appName string) ([]*config.Workload, error)
ListJobs(appName string) ([]*config.Workload, error)
}
// WorkloadAdder contains the methods needed to add jobs and services to an existing application.
type WorkloadAdder interface {
AddJobToApp(app *config.Application, jobName string) error
AddServiceToApp(app *config.Application, serviceName string) error
}
// Workspace contains the methods needed to manipulate a Copilot workspace.
type Workspace interface {
CopilotDirPath() (string, error)
WriteJobManifest(marshaler encoding.BinaryMarshaler, jobName string) (string, error)
WriteServiceManifest(marshaler encoding.BinaryMarshaler, serviceName string) (string, error)
}
// Prog contains the methods needed to render multi-stage operations.
type Prog interface {
Start(label string)
Stop(label string)
}
// WorkloadProps contains the information needed to represent a Workload (job or service).
type WorkloadProps struct {
App string
Type string
Name string
DockerfilePath string
Image string
}
// JobProps contains the information needed to represent a Job.
type JobProps struct {
WorkloadProps
Schedule string
HealthCheck *manifest.ContainerHealthCheck
Timeout string
Retries int
}
// ServiceProps contains the information needed to represent a Service (port, HealthCheck, and workload common props).
type ServiceProps struct {
WorkloadProps
Port uint16
HealthCheck *manifest.ContainerHealthCheck
appDomain *string
}
// WorkloadInitializer holds the clients necessary to initialize either a
// service or job in an existing application.
type WorkloadInitializer struct {
Store Store
Deployer WorkloadAdder
Ws Workspace
Prog Prog
}
// Service writes the service manifest, creates an ECR repository, and adds the service to SSM.
func (w *WorkloadInitializer) Service(i *ServiceProps) (string, error) {
return w.initService(i)
}
// Job writes the job manifest, creates an ECR repository, and adds the job to SSM.
func (w *WorkloadInitializer) Job(i *JobProps) (string, error) {
return w.initJob(i)
}
func (w *WorkloadInitializer) addWlToApp(app *config.Application, wlName string, wlType string) error {
switch wlType {
case svcWlType:
return w.Deployer.AddServiceToApp(app, wlName)
case jobWlType:
return w.Deployer.AddJobToApp(app, wlName)
default:
return fmt.Errorf(fmtErrUnrecognizedWlType, wlType)
}
}
func (w *WorkloadInitializer) addWlToStore(wl *config.Workload, wlType string) error {
switch wlType {
case svcWlType:
return w.Store.CreateService(wl)
case jobWlType:
return w.Store.CreateJob(wl)
default:
return fmt.Errorf(fmtErrUnrecognizedWlType, wlType)
}
}
func (w *WorkloadInitializer) initJob(props *JobProps) (string, error) {
if props.DockerfilePath != "" {
path, err := relativeDockerfilePath(w.Ws, props.DockerfilePath)
if err != nil {
return "", err
}
props.DockerfilePath = path
}
var manifestExists bool
mf, err := newJobManifest(props)
if err != nil {
return "", err
}
manifestPath, err := w.Ws.WriteJobManifest(mf, props.Name)
if err != nil {
e, ok := err.(*workspace.ErrFileExists)
if !ok {
return "", fmt.Errorf("write %s manifest: %w", jobWlType, err)
}
manifestExists = true
manifestPath = e.FileName
}
manifestPath, err = relPath(manifestPath)
if err != nil {
return "", err
}
manifestMsgFmt := "Wrote the manifest for %s %s at %s\n"
if manifestExists {
manifestMsgFmt = "Manifest file for %s %s already exists at %s, skipping writing it.\n"
}
log.Successf(manifestMsgFmt, jobWlType, color.HighlightUserInput(props.Name), color.HighlightResource(manifestPath))
var sched = props.Schedule
if props.Schedule == "" {
sched = "None"
}
helpText := fmt.Sprintf("Your manifest contains configurations like your container size and job schedule (%s).", sched)
log.Infoln(color.Help(helpText))
log.Infoln()
app, err := w.Store.GetApplication(props.App)
if err != nil {
return "", fmt.Errorf("get application %s: %w", props.App, err)
}
err = w.addJobToAppAndSSM(app, props.WorkloadProps)
if err != nil {
return "", err
}
return manifestPath, nil
}
func (w *WorkloadInitializer) initService(props *ServiceProps) (string, error) {
if props.DockerfilePath != "" {
path, err := relativeDockerfilePath(w.Ws, props.DockerfilePath)
if err != nil {
return "", err
}
props.DockerfilePath = path
}
app, err := w.Store.GetApplication(props.App)
if err != nil {
return "", fmt.Errorf("get application %s: %w", props.App, err)
}
if app.Domain != "" {
props.appDomain = aws.String(app.Domain)
}
var manifestExists bool
mf, err := w.newServiceManifest(props)
if err != nil {
return "", err
}
manifestPath, err := w.Ws.WriteServiceManifest(mf, props.Name)
if err != nil {
e, ok := err.(*workspace.ErrFileExists)
if !ok {
return "", fmt.Errorf("write %s manifest: %w", svcWlType, err)
}
manifestExists = true
manifestPath = e.FileName
}
manifestPath, err = relPath(manifestPath)
if err != nil {
return "", err
}
manifestMsgFmt := "Wrote the manifest for %s %s at %s\n"
if manifestExists {
manifestMsgFmt = "Manifest file for %s %s already exists at %s, skipping writing it.\n"
}
log.Successf(manifestMsgFmt, svcWlType, color.HighlightUserInput(props.Name), color.HighlightResource(manifestPath))
helpText := "Your manifest contains configurations like your container size and port."
if props.Port != 0 {
helpText = fmt.Sprintf("Your manifest contains configurations like your container size and port (:%d).", props.Port)
}
log.Infoln(color.Help(helpText))
log.Infoln()
err = w.addSvcToAppAndSSM(app, props.WorkloadProps)
if err != nil {
return "", err
}
return manifestPath, nil
}
func (w *WorkloadInitializer) addSvcToAppAndSSM(app *config.Application, props WorkloadProps) error {
return w.addWlToAppAndSSM(app, props, svcWlType)
}
func (w *WorkloadInitializer) addJobToAppAndSSM(app *config.Application, props WorkloadProps) error {
return w.addWlToAppAndSSM(app, props, jobWlType)
}
func (w *WorkloadInitializer) addWlToAppAndSSM(app *config.Application, props WorkloadProps, wlType string) error {
w.Prog.Start(fmt.Sprintf(fmtAddWlToAppStart, wlType, props.Name))
if err := w.addWlToApp(app, props.Name, wlType); err != nil {
w.Prog.Stop(log.Serrorf(fmtAddWlToAppFailed, wlType, props.Name))
return fmt.Errorf("add %s %s to application %s: %w", wlType, props.Name, props.App, err)
}
w.Prog.Stop(log.Ssuccessf(fmtAddWlToAppComplete, wlType, props.Name))
if err := w.addWlToStore(&config.Workload{
App: props.App,
Name: props.Name,
Type: props.Type,
}, wlType); err != nil {
return fmt.Errorf("saving %s %s: %w", wlType, props.Name, err)
}
return nil
}
func newJobManifest(i *JobProps) (encoding.BinaryMarshaler, error) {
switch i.Type {
case manifest.ScheduledJobType:
return manifest.NewScheduledJob(&manifest.ScheduledJobProps{
WorkloadProps: &manifest.WorkloadProps{
Name: i.Name,
Dockerfile: i.DockerfilePath,
Image: i.Image,
},
HealthCheck: i.HealthCheck,
Schedule: i.Schedule,
Timeout: i.Timeout,
Retries: i.Retries,
}), nil
default:
return nil, fmt.Errorf("job type %s doesn't have a manifest", i.Type)
}
}
func (w *WorkloadInitializer) newServiceManifest(i *ServiceProps) (encoding.BinaryMarshaler, error) {
switch i.Type {
case manifest.LoadBalancedWebServiceType:
return w.newLoadBalancedWebServiceManifest(i)
case manifest.RequestDrivenWebServiceType:
return w.newRequestDrivenWebServiceManifest(i), nil
case manifest.BackendServiceType:
return newBackendServiceManifest(i)
default:
return nil, fmt.Errorf("service type %s doesn't have a manifest", i.Type)
}
}
func (w *WorkloadInitializer) newLoadBalancedWebServiceManifest(i *ServiceProps) (*manifest.LoadBalancedWebService, error) {
props := &manifest.LoadBalancedWebServiceProps{
WorkloadProps: &manifest.WorkloadProps{
Name: i.Name,
Dockerfile: i.DockerfilePath,
Image: i.Image,
},
Port: i.Port,
HealthCheck: i.HealthCheck,
AppDomain: i.appDomain,
Path: "/",
}
existingSvcs, err := w.Store.ListServices(i.App)
if err != nil {
return nil, err
}
// We default to "/" for the first service, but if there's another
// Load Balanced Web Service, we use the svc name as the default, instead.
for _, existingSvc := range existingSvcs {
if existingSvc.Type == manifest.LoadBalancedWebServiceType && existingSvc.Name != i.Name {
props.Path = i.Name
break
}
}
return manifest.NewLoadBalancedWebService(props), nil
}
func (w *WorkloadInitializer) newRequestDrivenWebServiceManifest(i *ServiceProps) *manifest.RequestDrivenWebService {
props := &manifest.RequestDrivenWebServiceProps{
WorkloadProps: &manifest.WorkloadProps{
Name: i.Name,
Dockerfile: i.DockerfilePath,
Image: i.Image,
},
Port: i.Port,
}
return manifest.NewRequestDrivenWebService(props)
}
func newBackendServiceManifest(i *ServiceProps) (*manifest.BackendService, error) {
return manifest.NewBackendService(manifest.BackendServiceProps{
WorkloadProps: manifest.WorkloadProps{
Name: i.Name,
Dockerfile: i.DockerfilePath,
Image: i.Image,
},
Port: i.Port,
HealthCheck: i.HealthCheck,
}), nil
}
// relativeDockerfilePath returns the path from the workspace root to the Dockerfile.
func relativeDockerfilePath(ws Workspace, path string) (string, error) {
copilotDirPath, err := ws.CopilotDirPath()
if err != nil {
return "", fmt.Errorf("get copilot directory: %w", err)
}
wsRoot := filepath.Dir(copilotDirPath)
absDfPath, err := filepath.Abs(path)
if err != nil {
return "", fmt.Errorf("get absolute path: %v", err)
}
relDfPath, err := filepath.Rel(wsRoot, absDfPath)
if err != nil {
return "", fmt.Errorf("find relative path from workspace root to Dockerfile: %v", err)
}
return relDfPath, nil
}
// relPath returns the path relative to the current working directory.
func relPath(fullPath string) (string, error) {
wkdir, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("get working directory: %w", err)
}
path, err := filepath.Rel(wkdir, fullPath)
if err != nil {
return "", fmt.Errorf("get relative path of file: %w", err)
}
return path, nil
}
| 1 | 18,043 | Did we not use this field @_@ | aws-copilot-cli | go |
@@ -83,6 +83,10 @@ const Config = function () {
this.extraGnArgs = {}
this.extraNinjaOpts = []
this.braveSafetyNetApiKey = getNPMConfig(['brave_safetynet_api_key']) || ''
+ this.androidKeystorePath = getNPMConfig(['android_keystore_path']) || ''
+ this.androidKeyAlias = getNPMConfig(['android_key_alias']) || ''
+ this.androidKeystorePassword = getNPMConfig(['android_keystore_password']) || ''
+ this.androidKeyPassword = getNPMConfig(['android_key_password']) || ''
}
Config.prototype.buildArgs = function () { | 1 | 'use strict'
const path = require('path')
const fs = require('fs')
const assert = require('assert')
const packages = require('../package')
const getNPMConfig = (path) => {
const key = path.join('_').replace('-', '_')
const npm_prefix = 'npm_config_'
const package_config_prefix = 'npm_package_config_'
const package_prefix = 'npm_package_'
return process.env[npm_prefix + key] ||
process.env[package_config_prefix + key] ||
process.env[package_prefix + key]
}
const parseExtraInputs = (inputs, accumulator, callback) => {
for (let input of inputs) {
let separatorIndex = input.indexOf(':')
if (separatorIndex < 0) {
separatorIndex = input.length
}
const key = input.substring(0, separatorIndex);
const value = input.substring(separatorIndex + 1);
callback(accumulator, key, value)
}
}
const Config = function () {
this.defaultBuildConfig = 'Debug'
this.buildConfig = this.defaultBuildConfig
this.projectNames = []
this.projects = {}
this.signTarget = 'sign_app'
this.buildTarget = 'brave'
this.rootDir = path.join(path.dirname(__filename), '..')
this.scriptDir = path.join(this.rootDir, 'scripts')
this.depotToolsDir = path.join(this.rootDir, 'vendor', 'depot_tools')
this.srcDir = path.join(this.rootDir, getNPMConfig(['projects', 'chrome', 'dir']))
this.buildToolsDir = path.join(this.srcDir, 'build')
this.resourcesDir = path.join(this.rootDir, 'resources')
this.defaultGClientFile = path.join(this.rootDir, '.gclient')
this.gClientFile = process.env.BRAVE_GCLIENT_FILE || this.defaultGClientFile
this.gClientVerbose = getNPMConfig(['gclient_verbose']) || false
this.targetArch = getNPMConfig(['target_arch']) || 'x64'
this.targetOS = getNPMConfig(['target_os'])
this.gypTargetArch = 'x64'
this.targetApkBase ='classic'
this.androidOverrideVersionName = '0.0.0'
this.officialBuild = true
this.debugBuild = JSON.parse(getNPMConfig(['brave_debug_build']) || false)
this.braveGoogleApiKey = getNPMConfig(['brave_google_api_key']) || 'AIzaSyAQfxPJiounkhOjODEO5ZieffeBv6yft2Q'
this.googleApiKey = getNPMConfig(['google_api_key']) || 'AIzaSyAH90V94EcZBP5oH7oc-mXQrSKgASVxER8'
this.googleApiEndpoint = getNPMConfig(['brave_google_api_endpoint']) || 'https://www.googleapis.com/geolocation/v1/geolocate?key='
this.braveServicesKey = getNPMConfig(['brave_services_key']) || ''
this.infuraProjectId = getNPMConfig(['brave_infura_project_id']) || ''
this.safeBrowsingApiEndpoint = getNPMConfig(['safe_browsing_api_endpoint']) || 'safebrowsing.brave.com'
this.webcompatReportApiEndpoint = getNPMConfig(['webcompat_report_api_endpoint']) || 'https://webcompat.brave.com/1/webcompat'
this.buildProjects()
this.braveVersion = getNPMConfig(['version']) || '0.0.0.0'
this.chromeVersion = getNPMConfig(['projects', 'chrome', 'tag']) || '0.0.0.0'
this.releaseTag = this.braveVersion.split('+')[0]
this.mac_signing_identifier = getNPMConfig(['mac_signing_identifier']) || ''
this.mac_installer_signing_identifier = getNPMConfig(['mac_installer_signing_identifier']) || ''
this.mac_signing_keychain = getNPMConfig(['mac_signing_keychain']) || 'login'
this.mac_signing_output_prefix = 'signing'
this.notary_user = getNPMConfig(['notary_user']) || ''
this.notary_password = getNPMConfig(['notary_password']) || ''
this.channel = ''
this.sccache = getNPMConfig(['sccache'])
this.braveReferralsApiKey = getNPMConfig(['brave_referrals_api_key']) || ''
this.ignore_compile_failure = false
this.enable_hangout_services_extension = true
this.widevineVersion = getNPMConfig(['widevine', 'version'])
this.brave_enable_cdm_host_verification = false
this.sign_widevine_cert = process.env.SIGN_WIDEVINE_CERT || ''
this.sign_widevine_key = process.env.SIGN_WIDEVINE_KEY || ''
this.sign_widevine_passwd = process.env.SIGN_WIDEVINE_PASSPHRASE || ''
this.signature_generator = path.join(this.srcDir, 'third_party', 'widevine', 'scripts', 'signature_generator.py') || ''
this.extraGnArgs = {}
this.extraNinjaOpts = []
this.braveSafetyNetApiKey = getNPMConfig(['brave_safetynet_api_key']) || ''
}
Config.prototype.buildArgs = function () {
const version = this.braveVersion
let version_parts = version.split('+')[0]
version_parts = version_parts.split('.')
const chrome_version_parts = this.chromeVersion.split('.')
let args = {
fieldtrial_testing_like_official_build: true,
safe_browsing_mode: 1,
brave_services_key: this.braveServicesKey,
root_extra_deps: ["//brave"],
// TODO: Re-enable when chromium_src overrides work for files in relative
// paths like widevine_cmdm_compoennt_installer.cc
// use_jumbo_build: !this.officialBuild,
is_component_build: this.buildConfig !== 'Release',
proprietary_codecs: true,
ffmpeg_branding: "Chrome",
enable_nacl: false,
// branding_path_component: "brave",
enable_widevine: true,
target_cpu: this.targetArch,
target_apk_base: this.targetApkBase,
android_override_version_name: this.androidOverrideVersionName,
is_official_build: this.officialBuild,
is_debug: this.buildConfig !== 'Release',
dcheck_always_on: !this.officialBuild,
brave_channel: this.channel,
google_api_key: this.googleApiKey,
brave_google_api_key: this.braveGoogleApiKey,
brave_google_api_endpoint: this.googleApiEndpoint,
brave_infura_project_id: this.infuraProjectId,
brave_product_name: getNPMConfig(['brave_product_name']) || "brave-core",
brave_project_name: getNPMConfig(['brave_project_name']) || "brave-core",
brave_version_major: version_parts[0],
brave_version_minor: version_parts[1],
brave_version_build: version_parts[2],
chrome_version_string: this.chromeVersion,
chrome_version_major: chrome_version_parts[0],
safebrowsing_api_endpoint: this.safeBrowsingApiEndpoint,
webcompat_report_api_endpoint: this.webcompatReportApiEndpoint,
brave_referrals_api_key: this.braveReferralsApiKey,
enable_hangout_services_extension: this.enable_hangout_services_extension,
enable_cdm_host_verification: this.brave_enable_cdm_host_verification,
...this.extraGnArgs,
}
if (process.platform === 'darwin' && this.targetOS !== 'ios') {
args.mac_signing_identifier = this.mac_signing_identifier
args.mac_installer_signing_identifier = this.mac_installer_signing_identifier
args.mac_signing_keychain = this.mac_signing_keychain
args.mac_signing_output_prefix = this.mac_signing_output_prefix
if (this.notarize) {
args.notarize = true
args.notary_user = this.notary_user
args.notary_password = this.notary_password
}
}
if (process.platform === 'win32' && this.build_omaha) {
args.build_omaha = this.build_omaha
args.tag_ap = this.tag_ap
}
if (this.skip_signing) {
args.skip_signing = true
}
if (this.debugBuild && this.targetOS !== 'ios' &&
this.targetOS !== 'android') {
if (process.platform === 'darwin') {
args.enable_stripping = false
}
args.symbol_level = 2
args.enable_profiling = true
args.is_win_fastlink = true
}
if (this.sccache && process.platform === 'win32') {
args.clang_use_chrome_plugins = false
args.enable_precompiled_headers = false
args.use_thin_lto = true
}
if (this.targetArch === 'x86' && process.platform === 'linux') {
// Minimal symbols for target Linux x86, because ELF32 cannot be > 4GiB
args.symbol_level = 1
}
if (this.targetArch === 'x64' && process.platform === 'linux' &&
this.targetOS !== 'android') {
// Include vaapi support
args.use_vaapi = true
}
if (this.targetOS === 'android') {
args.target_os = 'android'
if (!this.officialBuild) {
args.chrome_public_manifest_package = 'com.brave.browser_default'
} else if (this.channel === '') {
args.chrome_public_manifest_package = 'com.brave.browser'
} else if (this.channel === 'beta') {
args.chrome_public_manifest_package = 'com.brave.browser_beta'
} else if (this.channel === 'dev') {
args.chrome_public_manifest_package = 'com.brave.browser_dev'
} else if (this.channel === 'nightly') {
args.chrome_public_manifest_package = 'com.brave.browser_nightly'
}
args.brave_safetynet_api_key = this.braveSafetyNetApiKey
args.enable_widevine = false
// TODO(fixme)
args.enable_tor = false
args.enable_brave_sync = false
// These do not exist on android
// TODO - recheck
delete args.safe_browsing_mode
delete args.proprietary_codecs
delete args.ffmpeg_branding
delete args.enable_nacl
delete args.branding_path_component
delete args.enable_hangout_services_extension
delete args.brave_infura_project_id
} else {
// This does not exist on non-Android platforms
delete args.android_override_version_name
}
if (this.targetOS === 'ios') {
args.target_os = 'ios'
args.enable_dsyms = false
args.enable_stripping = args.enable_dsyms
args.use_xcode_clang = args.is_official_build
args.use_clang_coverage = false
args.is_component_build = false
args.ios_deployment_target = '12.0'
args.ios_enable_code_signing = false
delete args.safebrowsing_api_endpoint
delete args.safe_browsing_mode
delete args.proprietary_codecs
delete args.ffmpeg_branding
delete args.enable_nacl
delete args.branding_path_component
delete args.enable_widevine
delete args.enable_hangout_services_extension
delete args.brave_google_api_endpoint
delete args.brave_google_api_key
delete args.brave_referrals_api_key
delete args.brave_infura_project_id
}
if (process.platform === 'win32') {
args.cc_wrapper = path.join(this.srcDir, 'brave', 'script', 'redirect-cc.cmd')
} else {
args.cc_wrapper = path.join(this.srcDir, 'brave', 'script', 'redirect-cc.py')
}
return args
}
Config.prototype.shouldSign = function () {
// it doesn't make sense to sign debug builds because the restrictions on loading
// dynamic libs prevents them from working anyway
return this.mac_signing_identifier !== '' &&
!this.skip_signing &&
this.buildConfig === 'Release' &&
this.targetOS !== 'ios'
}
Config.prototype.prependPath = function (oldPath, addPath) {
let newPath = oldPath.split(path.delimiter)
newPath.unshift(addPath)
newPath = newPath.join(path.delimiter)
return newPath
}
Config.prototype.appendPath = function (oldPath, addPath) {
let newPath = oldPath.split(path.delimiter)
newPath.push(addPath)
newPath = newPath.join(path.delimiter)
return newPath
}
Config.prototype.addPathToEnv = function (env, addPath, prepend = false) {
// cmd.exe uses Path instead of PATH so just set both
const addToPath = prepend ? this.prependPath : this.appendPath
env.Path && (env.Path = addToPath(env.Path, addPath))
env.PATH && (env.PATH = addToPath(env.PATH, addPath))
return env
}
Config.prototype.addPythonPathToEnv = function (env, addPath) {
env.PYTHONPATH = this.appendPath(env.PYTHONPATH || '', addPath)
return env
}
const getProjectVersion = function (projectName) {
return getNPMConfig(['projects', projectName, 'tag']) || getNPMConfig(['projects', projectName, 'branch'])
}
Config.prototype.getProjectRef = function (projectName) {
const ref = getNPMConfig(['projects', projectName, 'repository', 'ref'])
if (ref) {
return ref
}
const tag = getNPMConfig(['projects', projectName, 'tag'])
if (tag) {
return 'refs/tags/' + tag
}
const commit = getNPMConfig(['projects', projectName, 'commit'])
if (commit) {
return commit
}
let version = getNPMConfig(['projects', projectName, 'version'])
let branch = getNPMConfig(['projects', projectName, 'branch'])
if (!branch && !version) {
return 'origin/master'
}
if (!version) {
return `origin/${branch}`
}
branch = `origin/${version}`
if (projectName === 'brave-core') {
const chromeVersion = getProjectVersion('chrome')
if (chromeVersion) {
branch = `${branch}+${chromeVersion}`
}
}
return branch
}
Config.prototype.buildProjects = function () {
for (let name in packages.config.projects) {
this.projectNames.push(name)
}
this.projectNames.forEach((projectName) => {
this.projects[projectName] = {
ref: this.getProjectRef(projectName),
url: getNPMConfig(['projects', projectName, 'repository', 'url']),
gclientName: getNPMConfig(['projects', projectName, 'dir']),
dir: path.join(this.rootDir, getNPMConfig(['projects', projectName, 'dir'])),
custom_deps: packages.config.projects[projectName].custom_deps,
arg_name: projectName.replace('-', '_')
}
})
}
Config.prototype.update = function (options) {
if (options.target_arch === 'x86') {
this.targetArch = options.target_arch
this.gypTargetArch = 'ia32'
} else if (options.target_arch === 'ia32') {
this.targetArch = 'x86'
this.gypTargetArch = options.target_arch
} else if (options.target_arch) {
this.targetArch = options.target_arch
}
if (options.target_os === 'android') {
this.targetOS = 'android'
if (options.target_apk_base) {
this.targetApkBase = options.target_apk_base
}
if (options.android_override_version_name) {
this.androidOverrideVersionName = options.android_override_version_name
}
}
if (options.target_os) {
this.targetOS = options.target_os
}
if (options.C) {
this.buildConfig = path.basename(options.C)
this.__outputDir = options.C
}
if (options.gclient_file && options.gclient_file !== 'default') {
this.gClientFile = options.gclient_file
}
if (options.brave_google_api_key) {
this.braveGoogleApiKey = options.brave_google_api_key
}
if (options.brave_safetynet_api_key) {
this.braveSafetyNetApiKey = options.brave_safetynet_api_key;
}
if (options.brave_google_api_endpoint) {
this.googleApiEndpoint = options.brave_google_api_endpoint
}
if (options.brave_infura_project_id) {
this.infuraProjectId = options.infura_project_id
}
if (options.safebrowsing_api_endpoint) {
this.safeBrowsingApiEndpoint = options.safebrowsing_api_endpoint
}
if (options.webcompat_report_api_endpoint) {
this.webcompatReportApiEndpoint = options.webcompat_report_api_endpoint
}
if (options.brave_referrals_api_key) {
this.braveReferralsApiKey = options.brave_referrals_api_key
}
if (options.debug_build !== null && options.debug_build !== undefined) {
this.debugBuild = JSON.parse(options.debug_build)
} else {
this.debugBuild = this.buildConfig !== 'Release'
}
if (options.official_build !== null && options.official_build !== undefined) {
this.officialBuild = JSON.parse(options.official_build)
if (this.officialBuild) {
this.debugBuild = false
}
} else {
this.officialBuild = this.buildConfig === 'Release'
}
if (!this.officialBuild) {
this.channel = 'development'
} else if (options.channel !== 'release') {
// In chromium src, empty string represents stable channel.
this.channel = options.channel
}
if (this.buildConfig === 'Release' && process.platform !== 'linux') {
this.brave_enable_cdm_host_verification =
this.sign_widevine_cert !== "" && this.sign_widevine_key !== "" &&
this.sign_widevine_passwd !== "" && fs.existsSync(this.signature_generator)
if (this.brave_enable_cdm_host_verification) {
console.log('Widevine cdm host verification is enabled')
} else {
console.log('Widevine cdm host verification is disabled')
}
}
if (process.platform === 'win32' && options.build_omaha) {
this.build_omaha = true
this.tag_ap = options.tag_ap
}
if (options.skip_signing) {
this.skip_signing = true
}
if (options.mac_signing_identifier)
this.mac_signing_identifier = options.mac_signing_identifier
if (options.mac_installer_signing_identifier)
this.mac_installer_signing_identifier = options.mac_installer_signing_identifier
if (options.mac_signing_keychain)
this.mac_signing_keychain = options.mac_signing_keychain
if (options.notarize)
this.notarize = true
if (options.gclient_verbose)
this.gClientVerbose = options.gclient_verbose
if (options.ignore_compile_failure)
this.ignore_compile_failure = true
if (options.xcode_gen) {
assert(process.platform === 'darwin' || options.target_os === 'ios')
if (options.xcode_gen === 'ios') {
this.xcode_gen_target = '//brave/vendor/brave-ios:*'
} else {
this.xcode_gen_target = options.xcode_gen
}
}
if (options.gn) {
parseExtraInputs(options.gn, this.extraGnArgs, (args, key, value) => {
try {
value = JSON.parse(value)
} catch (e) {
// On parse error, leave value as string.
}
args[key] = value
})
}
if (options.ninja) {
parseExtraInputs(options.ninja, this.extraNinjaOpts, (opts, key, value) => {
opts.push(`-${key}`)
opts.push(value)
})
}
this.projectNames.forEach((projectName) => {
// don't update refs for projects that have them
let project = this.projects[projectName]
if (!project.ref)
return
let ref = options[project.arg_name + '_ref']
if (ref && ref !== 'default' && ref !== '') {
project.ref = ref
}
})
}
Object.defineProperty(Config.prototype, 'defaultOptions', {
get: function () {
let env = Object.assign({}, process.env)
env = this.addPathToEnv(env, this.depotToolsDir, true)
env = this.addPythonPathToEnv(env, path.join(this.srcDir, 'brave', 'chromium_src', 'python_modules'))
env = this.addPythonPathToEnv(env, path.join(this.srcDir, 'brave', 'script'))
env = this.addPythonPathToEnv(env, path.join(this.srcDir, 'tools', 'grit', 'grit', 'extern'))
env = this.addPythonPathToEnv(env, path.join(this.srcDir, 'brave', 'vendor', 'requests'))
env = this.addPythonPathToEnv(env, path.join(this.srcDir, 'build'))
env.GCLIENT_FILE = this.gClientFile
env.DEPOT_TOOLS_WIN_TOOLCHAIN = '0'
env.PYTHONUNBUFFERED = '1'
env.TARGET_ARCH = this.gypTargetArch // for brave scripts
env.GYP_MSVS_VERSION = env.GYP_MSVS_VERSION || '2017' // enable 2017
if (this.sccache) {
env.CC_WRAPPER = this.sccache
if (path.basename(this.sccache) === 'ccache') {
console.log('using ccache')
env.CCACHE_CPP2 = 'yes'
env.CCACHE_SLOPPINESS = 'pch_defines,time_macros,include_file_mtime'
env.CCACHE_BASEDIR = this.srcDir
env = this.addPathToEnv(env, path.join(this.srcDir, 'third_party', 'llvm-build', 'Release+Asserts', 'bin'))
} else {
console.log('using sccache')
}
}
if (process.platform === 'linux') {
env.LLVM_DOWNLOAD_GOLD_PLUGIN = '1'
}
return {
env,
stdio: 'inherit',
cwd: this.srcDir,
shell: true,
git_cwd: '.',
}
},
})
Object.defineProperty(Config.prototype, 'component', {
get: function () { return this.__component || (this.buildConfig === 'Release' ? 'static_library' : 'shared_library') },
set: function (component) { return this.__component = component },
})
Object.defineProperty(Config.prototype, 'outputDir', {
get: function () {
if (this.__outputDir)
return this.__outputDir
let baseDir = path.join(this.srcDir, 'out')
let buildConfigDir = this.buildConfig
if (this.targetArch && this.targetArch != 'x64') {
buildConfigDir = buildConfigDir + '_' + this.targetArch
}
if (this.targetOS) {
buildConfigDir = this.targetOS + "_" + buildConfigDir
}
return path.join(baseDir, buildConfigDir)
},
set: function (outputDir) { return this.__outputDir = outputDir },
})
module.exports = new Config
| 1 | 6,302 | shouldn't this be `android_keystore_name`? | brave-brave-browser | js |
@@ -39,7 +39,12 @@ func (c *controller) issuersForSecret(secret *corev1.Secret) ([]*v1alpha2.Cluste
}
if (iss.Spec.ACME != nil && iss.Spec.ACME.PrivateKey.Name == secret.Name) ||
(iss.Spec.CA != nil && iss.Spec.CA.SecretName == secret.Name) ||
- (iss.Spec.Vault != nil && iss.Spec.Vault.Auth.TokenSecretRef.Name == secret.Name) {
+ (iss.Spec.Vault != nil && iss.Spec.Vault.Auth.TokenSecretRef != nil &&
+ iss.Spec.Vault.Auth.TokenSecretRef.Name == secret.Name) ||
+ (iss.Spec.Vault != nil && iss.Spec.Vault.Auth.Kubernetes != nil &&
+ iss.Spec.Vault.Auth.Kubernetes.SecretRef.Name == secret.Name) ||
+ (iss.Spec.Vault != nil && iss.Spec.Vault.Auth.AppRole != nil &&
+ iss.Spec.Vault.Auth.AppRole.SecretRef.Name == secret.Name) {
affected = append(affected, iss)
continue
} | 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterissuers
import (
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2"
)
func (c *controller) issuersForSecret(secret *corev1.Secret) ([]*v1alpha2.ClusterIssuer, error) {
issuers, err := c.clusterIssuerLister.List(labels.NewSelector())
if err != nil {
return nil, fmt.Errorf("error listing certificiates: %s", err.Error())
}
var affected []*v1alpha2.ClusterIssuer
for _, iss := range issuers {
if secret.Namespace != c.clusterResourceNamespace {
continue
}
if (iss.Spec.ACME != nil && iss.Spec.ACME.PrivateKey.Name == secret.Name) ||
(iss.Spec.CA != nil && iss.Spec.CA.SecretName == secret.Name) ||
(iss.Spec.Vault != nil && iss.Spec.Vault.Auth.TokenSecretRef.Name == secret.Name) {
affected = append(affected, iss)
continue
}
}
return affected, nil
}
| 1 | 19,782 | Double check the issuers package too :) | jetstack-cert-manager | go |
@@ -170,7 +170,7 @@ TYPING_NAMES = frozenset(
class VariableVisitConsumerAction(Enum):
- """Used after _visit_consumer to determine the action to be taken
+ """Used after _check_consumer to determine the action to be taken
Continue -> continue loop to next consumer
Return -> return and thereby break the loop | 1 | # Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2009 Mads Kiilerich <[email protected]>
# Copyright (c) 2010 Daniel Harding <[email protected]>
# Copyright (c) 2011-2014, 2017 Google, Inc.
# Copyright (c) 2012 FELD Boris <[email protected]>
# Copyright (c) 2013-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Michal Nowikowski <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Ricardo Gemignani <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2015 Radu Ciorba <[email protected]>
# Copyright (c) 2015 Simu Toni <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016, 2018-2019 Ashley Whetter <[email protected]>
# Copyright (c) 2016, 2018 Jakub Wilk <[email protected]>
# Copyright (c) 2016-2017 Derek Gustafson <[email protected]>
# Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2016 Grant Welch <[email protected]>
# Copyright (c) 2017-2018, 2021 Ville Skyttä <[email protected]>
# Copyright (c) 2017-2018, 2020 hippo91 <[email protected]>
# Copyright (c) 2017 Dan Garrette <[email protected]>
# Copyright (c) 2018-2019 Jim Robertson <[email protected]>
# Copyright (c) 2018 Mike Miller <[email protected]>
# Copyright (c) 2018 Lucas Cimon <[email protected]>
# Copyright (c) 2018 Drew <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2018 ssolanki <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Mike Frysinger <[email protected]>
# Copyright (c) 2018 Marianna Polatoglou <[email protected]>
# Copyright (c) 2018 mar-chi-pan <[email protected]>
# Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019, 2021 Nick Drozd <[email protected]>
# Copyright (c) 2019 Djailla <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2020 Andrew Simmons <[email protected]>
# Copyright (c) 2020 Andrew Simmons <[email protected]>
# Copyright (c) 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2020 Ashley Whetter <[email protected]>
# Copyright (c) 2021 Daniël van Noord <[email protected]>
# Copyright (c) 2021 Tushar Sadhwani <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Copyright (c) 2021 bot <[email protected]>
# Copyright (c) 2021 David Liu <[email protected]>
# Copyright (c) 2021 kasium <[email protected]>
# Copyright (c) 2021 Marcin Kurczewski <[email protected]>
# Copyright (c) 2021 Sergei Lebedev <[email protected]>
# Copyright (c) 2021 Lorena B <[email protected]>
# Copyright (c) 2021 haasea <[email protected]>
# Copyright (c) 2021 Alexander Kapshuna <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""Variables checkers for Python code"""
import collections
import copy
import itertools
import os
import re
import sys
from enum import Enum
from functools import lru_cache
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
Dict,
List,
NamedTuple,
Optional,
Set,
Tuple,
Union,
)
import astroid
from astroid import nodes
from pylint.checkers import BaseChecker, utils
from pylint.checkers.utils import is_postponed_evaluation_enabled
from pylint.constants import PY39_PLUS
from pylint.interfaces import (
CONTROL_FLOW,
HIGH,
INFERENCE,
INFERENCE_FAILURE,
IAstroidChecker,
)
from pylint.utils import get_global_option
if TYPE_CHECKING:
from pylint.lint import PyLinter
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
SPECIAL_OBJ = re.compile("^_{2}[a-z]+_{2}$")
FUTURE = "__future__"
# regexp for ignored argument name
IGNORED_ARGUMENT_NAMES = re.compile("_.*|^ignored_|^unused_")
# In Python 3.7 abc has a Python implementation which is preferred
# by astroid. Unfortunately this also messes up our explicit checks
# for `abc`
METACLASS_NAME_TRANSFORMS = {"_py_abc": "abc"}
TYPING_TYPE_CHECKS_GUARDS = frozenset({"typing.TYPE_CHECKING", "TYPE_CHECKING"})
BUILTIN_RANGE = "builtins.range"
TYPING_MODULE = "typing"
TYPING_NAMES = frozenset(
{
"Any",
"Callable",
"ClassVar",
"Generic",
"Optional",
"Tuple",
"Type",
"TypeVar",
"Union",
"AbstractSet",
"ByteString",
"Container",
"ContextManager",
"Hashable",
"ItemsView",
"Iterable",
"Iterator",
"KeysView",
"Mapping",
"MappingView",
"MutableMapping",
"MutableSequence",
"MutableSet",
"Sequence",
"Sized",
"ValuesView",
"Awaitable",
"AsyncIterator",
"AsyncIterable",
"Coroutine",
"Collection",
"AsyncGenerator",
"AsyncContextManager",
"Reversible",
"SupportsAbs",
"SupportsBytes",
"SupportsComplex",
"SupportsFloat",
"SupportsInt",
"SupportsRound",
"Counter",
"Deque",
"Dict",
"DefaultDict",
"List",
"Set",
"FrozenSet",
"NamedTuple",
"Generator",
"AnyStr",
"Text",
"Pattern",
"BinaryIO",
}
)
class VariableVisitConsumerAction(Enum):
"""Used after _visit_consumer to determine the action to be taken
Continue -> continue loop to next consumer
Return -> return and thereby break the loop
Consume -> consume the found nodes (second return value) and return
"""
CONTINUE = 0
RETURN = 1
CONSUME = 2
def _is_from_future_import(stmt, name):
"""Check if the name is a future import from another module."""
try:
module = stmt.do_import_module(stmt.modname)
except astroid.AstroidBuildingException:
return None
for local_node in module.locals.get(name, []):
if isinstance(local_node, nodes.ImportFrom) and local_node.modname == FUTURE:
return True
return None
def in_for_else_branch(parent, stmt):
"""Returns True if stmt in inside the else branch for a parent For stmt."""
return isinstance(parent, nodes.For) and any(
else_stmt.parent_of(stmt) or else_stmt == stmt for else_stmt in parent.orelse
)
@lru_cache(maxsize=1000)
def overridden_method(klass, name):
"""get overridden method if any"""
try:
parent = next(klass.local_attr_ancestors(name))
except (StopIteration, KeyError):
return None
try:
meth_node = parent[name]
except KeyError:
# We have found an ancestor defining <name> but it's not in the local
# dictionary. This may happen with astroid built from living objects.
return None
if isinstance(meth_node, nodes.FunctionDef):
return meth_node
return None
def _get_unpacking_extra_info(node, inferred):
"""return extra information to add to the message for unpacking-non-sequence
and unbalanced-tuple-unpacking errors
"""
more = ""
inferred_module = inferred.root().name
if node.root().name == inferred_module:
if node.lineno == inferred.lineno:
more = f" {inferred.as_string()}"
elif inferred.lineno:
more = f" defined at line {inferred.lineno}"
elif inferred.lineno:
more = f" defined at line {inferred.lineno} of {inferred_module}"
return more
def _detect_global_scope(node, frame, defframe):
"""Detect that the given frames shares a global
scope.
Two frames shares a global scope when neither
of them are hidden under a function scope, as well
as any of parent scope of them, until the root scope.
In this case, depending from something defined later on
will not work, because it is still undefined.
Example:
class A:
# B has the same global scope as `C`, leading to a NameError.
class B(C): ...
class C: ...
"""
def_scope = scope = None
if frame and frame.parent:
scope = frame.parent.scope()
if defframe and defframe.parent:
def_scope = defframe.parent.scope()
if isinstance(frame, nodes.FunctionDef):
# If the parent of the current node is a
# function, then it can be under its scope
# (defined in, which doesn't concern us) or
# the `->` part of annotations. The same goes
# for annotations of function arguments, they'll have
# their parent the Arguments node.
if not isinstance(node.parent, (nodes.FunctionDef, nodes.Arguments)):
return False
elif any(
not isinstance(f, (nodes.ClassDef, nodes.Module)) for f in (frame, defframe)
):
# Not interested in other frames, since they are already
# not in a global scope.
return False
break_scopes = []
for current_scope in (scope, def_scope):
# Look for parent scopes. If there is anything different
# than a module or a class scope, then they frames don't
# share a global scope.
parent_scope = current_scope
while parent_scope:
if not isinstance(parent_scope, (nodes.ClassDef, nodes.Module)):
break_scopes.append(parent_scope)
break
if parent_scope.parent:
parent_scope = parent_scope.parent.scope()
else:
break
if break_scopes and len(set(break_scopes)) != 1:
# Store different scopes than expected.
# If the stored scopes are, in fact, the very same, then it means
# that the two frames (frame and defframe) shares the same scope,
# and we could apply our lineno analysis over them.
# For instance, this works when they are inside a function, the node
# that uses a definition and the definition itself.
return False
# At this point, we are certain that frame and defframe shares a scope
# and the definition of the first depends on the second.
return frame.lineno < defframe.lineno
def _infer_name_module(node, name):
context = astroid.context.InferenceContext()
context.lookupname = name
return node.infer(context, asname=False)
def _fix_dot_imports(not_consumed):
"""Try to fix imports with multiple dots, by returning a dictionary
with the import names expanded. The function unflattens root imports,
like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree'
and 'xml.sax' respectively.
"""
names = {}
for name, stmts in not_consumed.items():
if any(
isinstance(stmt, nodes.AssignName)
and isinstance(stmt.assign_type(), nodes.AugAssign)
for stmt in stmts
):
continue
for stmt in stmts:
if not isinstance(stmt, (nodes.ImportFrom, nodes.Import)):
continue
for imports in stmt.names:
second_name = None
import_module_name = imports[0]
if import_module_name == "*":
# In case of wildcard imports,
# pick the name from inside the imported module.
second_name = name
else:
name_matches_dotted_import = False
if (
import_module_name.startswith(name)
and import_module_name.find(".") > -1
):
name_matches_dotted_import = True
if name_matches_dotted_import or name in imports:
# Most likely something like 'xml.etree',
# which will appear in the .locals as 'xml'.
# Only pick the name if it wasn't consumed.
second_name = import_module_name
if second_name and second_name not in names:
names[second_name] = stmt
return sorted(names.items(), key=lambda a: a[1].fromlineno)
def _find_frame_imports(name, frame):
"""Detect imports in the frame, with the required
*name*. Such imports can be considered assignments.
Returns True if an import for the given name was found.
"""
imports = frame.nodes_of_class((nodes.Import, nodes.ImportFrom))
for import_node in imports:
for import_name, import_alias in import_node.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias == name:
return True
elif import_name and import_name == name:
return True
return None
def _import_name_is_global(stmt, global_names):
for import_name, import_alias in stmt.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias in global_names:
return True
elif import_name in global_names:
return True
return False
def _flattened_scope_names(iterator):
values = (set(stmt.names) for stmt in iterator)
return set(itertools.chain.from_iterable(values))
def _assigned_locally(name_node):
"""Checks if name_node has corresponding assign statement in same scope"""
assign_stmts = name_node.scope().nodes_of_class(nodes.AssignName)
return any(a.name == name_node.name for a in assign_stmts)
def _is_type_checking_import(node: Union[nodes.Import, nodes.ImportFrom]) -> bool:
"""Check if an import node is guarded by a TYPE_CHECKS guard"""
return any(
isinstance(ancestor, nodes.If)
and ancestor.test.as_string() in TYPING_TYPE_CHECKS_GUARDS
for ancestor in node.node_ancestors()
)
def _has_locals_call_after_node(stmt, scope):
skip_nodes = (
nodes.FunctionDef,
nodes.ClassDef,
nodes.Import,
nodes.ImportFrom,
)
for call in scope.nodes_of_class(nodes.Call, skip_klass=skip_nodes):
inferred = utils.safe_infer(call.func)
if (
utils.is_builtin_object(inferred)
and getattr(inferred, "name", None) == "locals"
):
if stmt.lineno < call.lineno:
return True
return False
MSGS = {
"E0601": (
"Using variable %r before assignment",
"used-before-assignment",
"Emitted when a local variable is accessed before its assignment took place. "
"Assignments in try blocks are assumed not to have occurred when evaluating "
"associated except/finally blocks. Assignments in except blocks are assumed "
"not to have occurred when evaluating statements outside the block, except "
"when the associated try block contains a return statement.",
),
"E0602": (
"Undefined variable %r",
"undefined-variable",
"Used when an undefined variable is accessed.",
),
"E0603": (
"Undefined variable name %r in __all__",
"undefined-all-variable",
"Used when an undefined variable name is referenced in __all__.",
),
"E0604": (
"Invalid object %r in __all__, must contain only strings",
"invalid-all-object",
"Used when an invalid (non-string) object occurs in __all__.",
),
"E0605": (
"Invalid format for __all__, must be tuple or list",
"invalid-all-format",
"Used when __all__ has an invalid format.",
),
"E0611": (
"No name %r in module %r",
"no-name-in-module",
"Used when a name cannot be found in a module.",
),
"W0601": (
"Global variable %r undefined at the module level",
"global-variable-undefined",
'Used when a variable is defined through the "global" statement '
"but the variable is not defined in the module scope.",
),
"W0602": (
"Using global for %r but no assignment is done",
"global-variable-not-assigned",
'Used when a variable is defined through the "global" statement '
"but no assignment to this variable is done.",
),
"W0603": (
"Using the global statement", # W0121
"global-statement",
'Used when you use the "global" statement to update a global '
"variable. Pylint just try to discourage this "
"usage. That doesn't mean you cannot use it !",
),
"W0604": (
"Using the global statement at the module level", # W0103
"global-at-module-level",
'Used when you use the "global" statement at the module level '
"since it has no effect",
),
"W0611": (
"Unused %s",
"unused-import",
"Used when an imported module or variable is not used.",
),
"W0612": (
"Unused variable %r",
"unused-variable",
"Used when a variable is defined but not used.",
),
"W0613": (
"Unused argument %r",
"unused-argument",
"Used when a function or method argument is not used.",
),
"W0614": (
"Unused import(s) %s from wildcard import of %s",
"unused-wildcard-import",
"Used when an imported module or variable is not used from a "
"`'from X import *'` style import.",
),
"W0621": (
"Redefining name %r from outer scope (line %s)",
"redefined-outer-name",
"Used when a variable's name hides a name defined in the outer scope.",
),
"W0622": (
"Redefining built-in %r",
"redefined-builtin",
"Used when a variable or function override a built-in.",
),
"W0631": (
"Using possibly undefined loop variable %r",
"undefined-loop-variable",
"Used when a loop variable (i.e. defined by a for loop or "
"a list comprehension or a generator expression) is used outside "
"the loop.",
),
"W0632": (
"Possible unbalanced tuple unpacking with "
"sequence%s: "
"left side has %d label(s), right side has %d value(s)",
"unbalanced-tuple-unpacking",
"Used when there is an unbalanced tuple unpacking in assignment",
{"old_names": [("E0632", "old-unbalanced-tuple-unpacking")]},
),
"E0633": (
"Attempting to unpack a non-sequence%s",
"unpacking-non-sequence",
"Used when something which is not "
"a sequence is used in an unpack assignment",
{"old_names": [("W0633", "old-unpacking-non-sequence")]},
),
"W0640": (
"Cell variable %s defined in loop",
"cell-var-from-loop",
"A variable used in a closure is defined in a loop. "
"This will result in all closures using the same value for "
"the closed-over variable.",
),
"W0641": (
"Possibly unused variable %r",
"possibly-unused-variable",
"Used when a variable is defined but might not be used. "
"The possibility comes from the fact that locals() might be used, "
"which could consume or not the said variable",
),
"W0642": (
"Invalid assignment to %s in method",
"self-cls-assignment",
"Invalid assignment to self or cls in instance or class method "
"respectively.",
),
}
class ScopeConsumer(NamedTuple):
"""Store nodes and their consumption states."""
to_consume: Dict[str, List[nodes.NodeNG]]
consumed: Dict[str, List[nodes.NodeNG]]
consumed_uncertain: DefaultDict[str, List[nodes.NodeNG]]
scope_type: str
class NamesConsumer:
"""A simple class to handle consumed, to consume and scope type info of node locals"""
def __init__(self, node, scope_type):
self._atomic = ScopeConsumer(
copy.copy(node.locals), {}, collections.defaultdict(list), scope_type
)
self.node = node
def __repr__(self):
to_consumes = [f"{k}->{v}" for k, v in self._atomic.to_consume.items()]
consumed = [f"{k}->{v}" for k, v in self._atomic.consumed.items()]
consumed_uncertain = [
f"{k}->{v}" for k, v in self._atomic.consumed_uncertain.items()
]
to_consumes = ", ".join(to_consumes)
consumed = ", ".join(consumed)
consumed_uncertain = ", ".join(consumed_uncertain)
return f"""
to_consume : {to_consumes}
consumed : {consumed}
consumed_uncertain: {consumed_uncertain}
scope_type : {self._atomic.scope_type}
"""
def __iter__(self):
return iter(self._atomic)
@property
def to_consume(self):
return self._atomic.to_consume
@property
def consumed(self):
return self._atomic.consumed
@property
def consumed_uncertain(self) -> DefaultDict[str, List[nodes.NodeNG]]:
"""Retrieves nodes filtered out by get_next_to_consume() that may not
have executed, such as statements in except blocks, or statements
in try blocks (when evaluating their corresponding except and finally
blocks). Checkers that want to treat the statements as executed
(e.g. for unused-variable) may need to add them back.
"""
return self._atomic.consumed_uncertain
@property
def scope_type(self):
return self._atomic.scope_type
def mark_as_consumed(self, name, consumed_nodes):
"""Mark the given nodes as consumed for the name.
If all of the nodes for the name were consumed, delete the name from
the to_consume dictionary
"""
unconsumed = [n for n in self.to_consume[name] if n not in set(consumed_nodes)]
self.consumed[name] = consumed_nodes
if unconsumed:
self.to_consume[name] = unconsumed
else:
del self.to_consume[name]
def get_next_to_consume(self, node: nodes.Name) -> Optional[List[nodes.NodeNG]]:
"""Return a list of the nodes that define `node` from this scope. If it is
uncertain whether a node will be consumed, such as for statements in
except blocks, add it to self.consumed_uncertain instead of returning it.
Return None to indicate a special case that needs to be handled by the caller.
"""
name = node.name
parent_node = node.parent
found_nodes = self.to_consume.get(name)
node_statement = node.statement(future=True)
if (
found_nodes
and isinstance(parent_node, nodes.Assign)
and parent_node == found_nodes[0].parent
):
lhs = found_nodes[0].parent.targets[0]
if lhs.name == name: # this name is defined in this very statement
found_nodes = None
if (
found_nodes
and isinstance(parent_node, nodes.For)
and parent_node.iter == node
and parent_node.target in found_nodes
):
found_nodes = None
# Filter out assignments in ExceptHandlers that node is not contained in
if found_nodes:
found_nodes = [
n
for n in found_nodes
if not isinstance(n.statement(future=True), nodes.ExceptHandler)
or n.statement(future=True).parent_of(node)
]
# Filter out assignments in an Except clause that the node is not
# contained in, assuming they may fail
if found_nodes:
uncertain_nodes = self._uncertain_nodes_in_except_blocks(
found_nodes, node, node_statement
)
self.consumed_uncertain[node.name] += uncertain_nodes
uncertain_nodes_set = set(uncertain_nodes)
found_nodes = [n for n in found_nodes if n not in uncertain_nodes_set]
# If this node is in a Finally block of a Try/Finally,
# filter out assignments in the try portion, assuming they may fail
if found_nodes:
uncertain_nodes = (
self._uncertain_nodes_in_try_blocks_when_evaluating_finally_blocks(
found_nodes, node_statement
)
)
self.consumed_uncertain[node.name] += uncertain_nodes
uncertain_nodes_set = set(uncertain_nodes)
found_nodes = [n for n in found_nodes if n not in uncertain_nodes_set]
# If this node is in an ExceptHandler,
# filter out assignments in the try portion, assuming they may fail
if found_nodes:
uncertain_nodes = (
self._uncertain_nodes_in_try_blocks_when_evaluating_except_blocks(
found_nodes, node_statement
)
)
self.consumed_uncertain[node.name] += uncertain_nodes
uncertain_nodes_set = set(uncertain_nodes)
found_nodes = [n for n in found_nodes if n not in uncertain_nodes_set]
return found_nodes
@staticmethod
def _uncertain_nodes_in_except_blocks(
found_nodes: List[nodes.NodeNG],
node: nodes.NodeNG,
node_statement: nodes.Statement,
) -> List[nodes.NodeNG]:
"""Return any nodes in ``found_nodes`` that should be treated as uncertain
because they are in an except block.
"""
uncertain_nodes = []
for other_node in found_nodes:
other_node_statement = other_node.statement(future=True)
# Only testing for statements in the except block of TryExcept
if not (
isinstance(other_node_statement.parent, nodes.ExceptHandler)
and isinstance(other_node_statement.parent.parent, nodes.TryExcept)
):
continue
# If the other node is in the same scope as this node, assume it executes
if other_node_statement.parent.parent_of(node):
continue
try_block_returns = any(
isinstance(try_statement, nodes.Return)
for try_statement in other_node_statement.parent.parent.body
)
# If the try block returns, assume the except blocks execute.
if try_block_returns:
# Exception: if this node is in the final block of the other_node_statement,
# it will execute before returning. Assume the except statements are uncertain.
if (
isinstance(node_statement.parent, nodes.TryFinally)
and node_statement in node_statement.parent.finalbody
# We have already tested that other_node_statement has two parents
# and it was TryExcept, so getting one more parent is safe.
and other_node_statement.parent.parent.parent.parent_of(
node_statement
)
):
uncertain_nodes.append(other_node)
else:
# Assume the except blocks execute. Possibility for a false negative
# if one of the except blocks does not define the name in question,
# raise, or return. See: https://github.com/PyCQA/pylint/issues/5524.
continue
# Passed all tests for uncertain execution
uncertain_nodes.append(other_node)
return uncertain_nodes
@staticmethod
def _uncertain_nodes_in_try_blocks_when_evaluating_except_blocks(
found_nodes: List[nodes.NodeNG], node_statement: nodes.Statement
) -> List[nodes.NodeNG]:
"""Return any nodes in ``found_nodes`` that should be treated as uncertain
because they are in a try block and the ``node_statement`` being evaluated
is in one of its except handlers.
"""
uncertain_nodes: List[nodes.NodeNG] = []
closest_except_handler = utils.get_node_first_ancestor_of_type(
node_statement, nodes.ExceptHandler
)
if closest_except_handler is None:
return uncertain_nodes
for other_node in found_nodes:
other_node_statement = other_node.statement(future=True)
# If the other statement is the except handler guarding `node`, it executes
if other_node_statement is closest_except_handler:
continue
# Ensure other_node is in a try block
(
other_node_try_ancestor,
other_node_try_ancestor_visited_child,
) = utils.get_node_first_ancestor_of_type_and_its_child(
other_node_statement, nodes.TryExcept
)
if other_node_try_ancestor is None:
continue
if (
other_node_try_ancestor_visited_child
not in other_node_try_ancestor.body
):
continue
# Make sure nesting is correct -- there should be at least one
# except handler that is a sibling attached to the try ancestor,
# or is an ancestor of the try ancestor.
if not any(
closest_except_handler in other_node_try_ancestor.handlers
or other_node_try_ancestor_except_handler
in closest_except_handler.node_ancestors()
for other_node_try_ancestor_except_handler in other_node_try_ancestor.handlers
):
continue
# Passed all tests for uncertain execution
uncertain_nodes.append(other_node)
return uncertain_nodes
@staticmethod
def _uncertain_nodes_in_try_blocks_when_evaluating_finally_blocks(
found_nodes: List[nodes.NodeNG], node_statement: nodes.Statement
) -> List[nodes.NodeNG]:
uncertain_nodes: List[nodes.NodeNG] = []
(
closest_try_finally_ancestor,
child_of_closest_try_finally_ancestor,
) = utils.get_node_first_ancestor_of_type_and_its_child(
node_statement, nodes.TryFinally
)
if closest_try_finally_ancestor is None:
return uncertain_nodes
if (
child_of_closest_try_finally_ancestor
not in closest_try_finally_ancestor.finalbody
):
return uncertain_nodes
for other_node in found_nodes:
other_node_statement = other_node.statement(future=True)
(
other_node_try_finally_ancestor,
child_of_other_node_try_finally_ancestor,
) = utils.get_node_first_ancestor_of_type_and_its_child(
other_node_statement, nodes.TryFinally
)
if other_node_try_finally_ancestor is None:
continue
# other_node needs to descend from the try of a try/finally.
if (
child_of_other_node_try_finally_ancestor
not in other_node_try_finally_ancestor.body
):
continue
# If the two try/finally ancestors are not the same, then
# node_statement's closest try/finally ancestor needs to be in
# the final body of other_node's try/finally ancestor, or
# descend from one of the statements in that final body.
if (
other_node_try_finally_ancestor is not closest_try_finally_ancestor
and not any(
other_node_final_statement is closest_try_finally_ancestor
or other_node_final_statement.parent_of(
closest_try_finally_ancestor
)
for other_node_final_statement in other_node_try_finally_ancestor.finalbody
)
):
continue
# Passed all tests for uncertain execution
uncertain_nodes.append(other_node)
return uncertain_nodes
# pylint: disable=too-many-public-methods
class VariablesChecker(BaseChecker):
"""checks for
* unused variables / imports
* undefined variables
* redefinition of variable from builtins or from an outer scope
* use of variable before assignment
* __all__ consistency
* self/cls assignment
"""
__implements__ = IAstroidChecker
name = "variables"
msgs = MSGS
priority = -1
options = (
(
"init-import",
{
"default": 0,
"type": "yn",
"metavar": "<y or n>",
"help": "Tells whether we should check for unused import in "
"__init__ files.",
},
),
(
"dummy-variables-rgx",
{
"default": "_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_",
"type": "regexp",
"metavar": "<regexp>",
"help": "A regular expression matching the name of dummy "
"variables (i.e. expected to not be used).",
},
),
(
"additional-builtins",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of additional names supposed to be defined in "
"builtins. Remember that you should avoid defining new builtins "
"when possible.",
},
),
(
"callbacks",
{
"default": ("cb_", "_cb"),
"type": "csv",
"metavar": "<callbacks>",
"help": "List of strings which can identify a callback "
"function by name. A callback name must start or "
"end with one of those strings.",
},
),
(
"redefining-builtins-modules",
{
"default": (
"six.moves",
"past.builtins",
"future.builtins",
"builtins",
"io",
),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of qualified module names which can have objects "
"that can redefine builtins.",
},
),
(
"ignored-argument-names",
{
"default": IGNORED_ARGUMENT_NAMES,
"type": "regexp",
"metavar": "<regexp>",
"help": "Argument names that match this expression will be "
"ignored. Default to name with leading underscore.",
},
),
(
"allow-global-unused-variables",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Tells whether unused global variables should be treated as a violation.",
},
),
(
"allowed-redefined-builtins",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of names allowed to shadow builtins",
},
),
)
def __init__(self, linter=None):
super().__init__(linter)
self._to_consume: List[NamesConsumer] = []
self._checking_mod_attr = None
self._loop_variables = []
self._type_annotation_names = []
self._except_handler_names_queue: List[
Tuple[nodes.ExceptHandler, nodes.AssignName]
] = []
"""This is a queue, last in first out"""
self._postponed_evaluation_enabled = False
def open(self) -> None:
"""Called when loading the checker"""
self._is_undefined_variable_enabled = self.linter.is_message_enabled(
"undefined-variable"
)
self._is_used_before_assignment_enabled = self.linter.is_message_enabled(
"used-before-assignment"
)
self._is_undefined_loop_variable_enabled = self.linter.is_message_enabled(
"undefined-loop-variable"
)
@utils.check_messages("redefined-outer-name")
def visit_for(self, node: nodes.For) -> None:
assigned_to = [a.name for a in node.target.nodes_of_class(nodes.AssignName)]
# Only check variables that are used
dummy_rgx = self.config.dummy_variables_rgx
assigned_to = [var for var in assigned_to if not dummy_rgx.match(var)]
for variable in assigned_to:
for outer_for, outer_variables in self._loop_variables:
if variable in outer_variables and not in_for_else_branch(
outer_for, node
):
self.add_message(
"redefined-outer-name",
args=(variable, outer_for.fromlineno),
node=node,
)
break
self._loop_variables.append((node, assigned_to))
@utils.check_messages("redefined-outer-name")
def leave_for(self, node: nodes.For) -> None:
self._loop_variables.pop()
self._store_type_annotation_names(node)
def visit_module(self, node: nodes.Module) -> None:
"""visit module : update consumption analysis variable
checks globals doesn't overrides builtins
"""
self._to_consume = [NamesConsumer(node, "module")]
self._postponed_evaluation_enabled = is_postponed_evaluation_enabled(node)
for name, stmts in node.locals.items():
if utils.is_builtin(name):
if self._should_ignore_redefined_builtin(stmts[0]) or name == "__doc__":
continue
self.add_message("redefined-builtin", args=name, node=stmts[0])
@utils.check_messages(
"unused-import",
"unused-wildcard-import",
"redefined-builtin",
"undefined-all-variable",
"invalid-all-object",
"invalid-all-format",
"unused-variable",
)
def leave_module(self, node: nodes.Module) -> None:
"""leave module: check globals"""
assert len(self._to_consume) == 1
self._check_metaclasses(node)
not_consumed = self._to_consume.pop().to_consume
# attempt to check for __all__ if defined
if "__all__" in node.locals:
self._check_all(node, not_consumed)
# check for unused globals
self._check_globals(not_consumed)
# don't check unused imports in __init__ files
if not self.config.init_import and node.package:
return
self._check_imports(not_consumed)
def visit_classdef(self, node: nodes.ClassDef) -> None:
"""visit class: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "class"))
def leave_classdef(self, _: nodes.ClassDef) -> None:
"""leave class: update consumption analysis variable"""
# do not check for not used locals here (no sense)
self._to_consume.pop()
def visit_lambda(self, node: nodes.Lambda) -> None:
"""visit lambda: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "lambda"))
def leave_lambda(self, _: nodes.Lambda) -> None:
"""leave lambda: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_generatorexp(self, node: nodes.GeneratorExp) -> None:
"""visit genexpr: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_generatorexp(self, _: nodes.GeneratorExp) -> None:
"""leave genexpr: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_dictcomp(self, node: nodes.DictComp) -> None:
"""visit dictcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_dictcomp(self, _: nodes.DictComp) -> None:
"""leave dictcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_setcomp(self, node: nodes.SetComp) -> None:
"""visit setcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_setcomp(self, _: nodes.SetComp) -> None:
"""leave setcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
"""visit function: update consumption analysis variable and check locals"""
self._to_consume.append(NamesConsumer(node, "function"))
if not (
self.linter.is_message_enabled("redefined-outer-name")
or self.linter.is_message_enabled("redefined-builtin")
):
return
globs = node.root().globals
for name, stmt in node.items():
if name in globs and not isinstance(stmt, nodes.Global):
definition = globs[name][0]
if (
isinstance(definition, nodes.ImportFrom)
and definition.modname == FUTURE
):
# It is a __future__ directive, not a symbol.
continue
# Do not take in account redefined names for the purpose
# of type checking.:
if any(
isinstance(definition.parent, nodes.If)
and definition.parent.test.as_string() in TYPING_TYPE_CHECKS_GUARDS
for definition in globs[name]
):
continue
line = definition.fromlineno
if not self._is_name_ignored(stmt, name):
self.add_message(
"redefined-outer-name", args=(name, line), node=stmt
)
elif (
utils.is_builtin(name)
and not self._allowed_redefined_builtin(name)
and not self._should_ignore_redefined_builtin(stmt)
):
# do not print Redefining builtin for additional builtins
self.add_message("redefined-builtin", args=name, node=stmt)
def leave_functiondef(self, node: nodes.FunctionDef) -> None:
"""leave function: check function's locals are consumed"""
self._check_metaclasses(node)
if node.type_comment_returns:
self._store_type_annotation_node(node.type_comment_returns)
if node.type_comment_args:
for argument_annotation in node.type_comment_args:
self._store_type_annotation_node(argument_annotation)
not_consumed = self._to_consume.pop().to_consume
if not (
self.linter.is_message_enabled("unused-variable")
or self.linter.is_message_enabled("possibly-unused-variable")
or self.linter.is_message_enabled("unused-argument")
):
return
# Don't check arguments of function which are only raising an exception.
if utils.is_error(node):
return
# Don't check arguments of abstract methods or within an interface.
is_method = node.is_method()
if is_method and node.is_abstract():
return
global_names = _flattened_scope_names(node.nodes_of_class(nodes.Global))
nonlocal_names = _flattened_scope_names(node.nodes_of_class(nodes.Nonlocal))
for name, stmts in not_consumed.items():
self._check_is_unused(name, node, stmts[0], global_names, nonlocal_names)
visit_asyncfunctiondef = visit_functiondef
leave_asyncfunctiondef = leave_functiondef
@utils.check_messages(
"global-variable-undefined",
"global-variable-not-assigned",
"global-statement",
"global-at-module-level",
"redefined-builtin",
)
def visit_global(self, node: nodes.Global) -> None:
"""check names imported exists in the global scope"""
frame = node.frame(future=True)
if isinstance(frame, nodes.Module):
self.add_message("global-at-module-level", node=node)
return
module = frame.root()
default_message = True
locals_ = node.scope().locals
for name in node.names:
try:
assign_nodes = module.getattr(name)
except astroid.NotFoundError:
# unassigned global, skip
assign_nodes = []
not_defined_locally_by_import = not any(
isinstance(local, nodes.Import) for local in locals_.get(name, ())
)
if (
not utils.is_reassigned_after_current(node, name)
and not utils.is_deleted_after_current(node, name)
and not_defined_locally_by_import
):
self.add_message("global-variable-not-assigned", args=name, node=node)
default_message = False
continue
for anode in assign_nodes:
if (
isinstance(anode, nodes.AssignName)
and anode.name in module.special_attributes
):
self.add_message("redefined-builtin", args=name, node=node)
break
if anode.frame(future=True) is module:
# module level assignment
break
if (
isinstance(anode, (nodes.ClassDef, nodes.FunctionDef))
and anode.parent is module
):
# module level function assignment
break
else:
if not_defined_locally_by_import:
# global undefined at the module scope
self.add_message("global-variable-undefined", args=name, node=node)
default_message = False
if default_message:
self.add_message("global-statement", node=node)
def visit_assignname(self, node: nodes.AssignName) -> None:
if isinstance(node.assign_type(), nodes.AugAssign):
self.visit_name(node)
def visit_delname(self, node: nodes.DelName) -> None:
self.visit_name(node)
def visit_name(self, node: nodes.Name) -> None:
"""Don't add the 'utils.check_messages' decorator here!
It's important that all 'Name' nodes are visited, otherwise the
'NamesConsumers' won't be correct.
"""
stmt = node.statement(future=True)
if stmt.fromlineno is None:
# name node from an astroid built from live code, skip
assert not stmt.root().file.endswith(".py")
return
self._undefined_and_used_before_checker(node, stmt)
if self._is_undefined_loop_variable_enabled:
self._loopvar_name(node)
@utils.check_messages("redefined-outer-name")
def visit_excepthandler(self, node: nodes.ExceptHandler) -> None:
if not node.name or not isinstance(node.name, nodes.AssignName):
return
for outer_except, outer_except_assign_name in self._except_handler_names_queue:
if node.name.name == outer_except_assign_name.name:
self.add_message(
"redefined-outer-name",
args=(outer_except_assign_name.name, outer_except.fromlineno),
node=node,
)
break
self._except_handler_names_queue.append((node, node.name))
@utils.check_messages("redefined-outer-name")
def leave_excepthandler(self, node: nodes.ExceptHandler) -> None:
if not node.name or not isinstance(node.name, nodes.AssignName):
return
self._except_handler_names_queue.pop()
def _undefined_and_used_before_checker(
self, node: nodes.Name, stmt: nodes.NodeNG
) -> None:
frame = stmt.scope()
start_index = len(self._to_consume) - 1
# iterates through parent scopes, from the inner to the outer
base_scope_type = self._to_consume[start_index].scope_type
for i in range(start_index, -1, -1):
current_consumer = self._to_consume[i]
# Certain nodes shouldn't be checked as they get checked another time
if self._should_node_be_skipped(node, current_consumer, i == start_index):
continue
action, found_nodes = self._check_consumer(
node, stmt, frame, current_consumer, i, base_scope_type
)
if action is VariableVisitConsumerAction.CONTINUE:
continue
if action is VariableVisitConsumerAction.CONSUME:
# Any nodes added to consumed_uncertain by get_next_to_consume()
# should be added back so that they are marked as used.
# They will have already had a chance to emit used-before-assignment.
# We check here instead of before every single return in _check_consumer()
found_nodes += current_consumer.consumed_uncertain[node.name] # type: ignore[operator]
current_consumer.mark_as_consumed(node.name, found_nodes)
if action in {
VariableVisitConsumerAction.RETURN,
VariableVisitConsumerAction.CONSUME,
}:
return
# we have not found the name, if it isn't a builtin, that's an
# undefined name !
if (
self._is_undefined_variable_enabled
and not (
node.name in nodes.Module.scope_attrs
or utils.is_builtin(node.name)
or node.name in self.config.additional_builtins
or (
node.name == "__class__"
and isinstance(frame, nodes.FunctionDef)
and frame.is_method()
)
)
and not utils.node_ignores_exception(node, NameError)
):
self.add_message("undefined-variable", args=node.name, node=node)
def _should_node_be_skipped(
self, node: nodes.Name, consumer: NamesConsumer, is_start_index: bool
) -> bool:
"""Tests a consumer and node for various conditions in which the node
shouldn't be checked for the undefined-variable and used-before-assignment checks.
"""
if consumer.scope_type == "class":
# The list of base classes in the class definition is not part
# of the class body.
# If the current scope is a class scope but it's not the inner
# scope, ignore it. This prevents to access this scope instead of
# the globals one in function members when there are some common
# names.
if utils.is_ancestor_name(consumer.node, node) or (
not is_start_index and self._ignore_class_scope(node)
):
return True
# Ignore inner class scope for keywords in class definition
if isinstance(node.parent, nodes.Keyword) and isinstance(
node.parent.parent, nodes.ClassDef
):
return True
elif consumer.scope_type == "function" and self._defined_in_function_definition(
node, consumer.node
):
# If the name node is used as a function default argument's value or as
# a decorator, then start from the parent frame of the function instead
# of the function frame - and thus open an inner class scope
return True
elif consumer.scope_type == "lambda" and utils.is_default_argument(
node, consumer.node
):
return True
return False
# pylint: disable=too-many-return-statements
def _check_consumer(
self,
node: nodes.Name,
stmt: nodes.NodeNG,
frame: nodes.LocalsDictNodeNG,
current_consumer: NamesConsumer,
consumer_level: int,
base_scope_type: Any,
) -> Union[
Tuple[
Union[
Literal[VariableVisitConsumerAction.CONTINUE],
Literal[VariableVisitConsumerAction.RETURN],
],
None,
],
Tuple[Literal[VariableVisitConsumerAction.CONSUME], List[nodes.NodeNG]],
]:
"""Checks a consumer for conditions that should trigger messages"""
# If the name has already been consumed, only check it's not a loop
# variable used outside the loop.
# Avoid the case where there are homonyms inside function scope and
# comprehension current scope (avoid bug #1731)
if node.name in current_consumer.consumed:
if utils.is_func_decorator(current_consumer.node) or not (
current_consumer.scope_type == "comprehension"
and self._has_homonym_in_upper_function_scope(node, consumer_level)
# But don't catch homonyms against the filter of a comprehension,
# (like "if x" in "[x for x in expr() if x]")
# https://github.com/PyCQA/pylint/issues/5586
and not (
(
isinstance(node.parent.parent, nodes.Comprehension)
and node.parent in node.parent.parent.ifs
)
# Or homonyms against values to keyword arguments
# (like "var" in "[func(arg=var) for var in expr()]")
or (
isinstance(node.scope(), nodes.ComprehensionScope)
and isinstance(node.parent, (nodes.Call, nodes.Keyword))
)
)
):
self._check_late_binding_closure(node)
self._loopvar_name(node)
return (VariableVisitConsumerAction.RETURN, None)
found_nodes = current_consumer.get_next_to_consume(node)
if found_nodes is None:
return (VariableVisitConsumerAction.CONTINUE, None)
if not found_nodes:
if node.name in current_consumer.consumed_uncertain:
confidence = CONTROL_FLOW
else:
confidence = HIGH
self.add_message(
"used-before-assignment",
args=node.name,
node=node,
confidence=confidence,
)
if current_consumer.consumed_uncertain[node.name]:
# If there are nodes added to consumed_uncertain by
# get_next_to_consume() because they might not have executed,
# return a CONSUME action so that _undefined_and_used_before_checker()
# will mark them as used
return (VariableVisitConsumerAction.CONSUME, found_nodes)
return (VariableVisitConsumerAction.RETURN, None)
self._check_late_binding_closure(node)
if not (
self._is_undefined_variable_enabled
or self._is_used_before_assignment_enabled
):
return (VariableVisitConsumerAction.CONSUME, found_nodes)
defnode = utils.assign_parent(found_nodes[0])
defstmt = defnode.statement(future=True)
defframe = defstmt.frame(future=True)
# The class reuses itself in the class scope.
is_recursive_klass = (
frame is defframe
and defframe.parent_of(node)
and isinstance(defframe, nodes.ClassDef)
and node.name == defframe.name
)
if (
is_recursive_klass
and utils.get_node_first_ancestor_of_type(node, nodes.Lambda)
and (
not utils.is_default_argument(node)
or node.scope().parent.scope() is not defframe
)
):
# Self-referential class references are fine in lambda's --
# As long as they are not part of the default argument directly
# under the scope of the parent self-referring class.
# Example of valid default argument:
# class MyName3:
# myattr = 1
# mylambda3 = lambda: lambda a=MyName3: a
# Example of invalid default argument:
# class MyName4:
# myattr = 1
# mylambda4 = lambda a=MyName4: lambda: a
# If the above conditional is True,
# there is no possibility of undefined-variable
# Also do not consume class name
# (since consuming blocks subsequent checks)
# -- quit
return (VariableVisitConsumerAction.RETURN, None)
(
maybe_before_assign,
annotation_return,
use_outer_definition,
) = self._is_variable_violation(
node,
defnode,
stmt,
defstmt,
frame,
defframe,
base_scope_type,
is_recursive_klass,
)
if use_outer_definition:
return (VariableVisitConsumerAction.CONTINUE, None)
if (
maybe_before_assign
and not utils.is_defined_before(node)
and not astroid.are_exclusive(stmt, defstmt, ("NameError",))
):
# Used and defined in the same place, e.g `x += 1` and `del x`
defined_by_stmt = defstmt is stmt and isinstance(
node, (nodes.DelName, nodes.AssignName)
)
if (
is_recursive_klass
or defined_by_stmt
or annotation_return
or isinstance(defstmt, nodes.Delete)
):
if not utils.node_ignores_exception(node, NameError):
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(
stmt,
(
nodes.AnnAssign,
nodes.FunctionDef,
nodes.Arguments,
),
)
and node.name in node.root().locals
):
self.add_message(
"undefined-variable", args=node.name, node=node
)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
elif base_scope_type != "lambda":
# E0601 may *not* occurs in lambda scope.
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(stmt, (nodes.AnnAssign, nodes.FunctionDef))
):
self.add_message(
"used-before-assignment",
args=node.name,
node=node,
confidence=HIGH,
)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
elif base_scope_type == "lambda":
# E0601 can occur in class-level scope in lambdas, as in
# the following example:
# class A:
# x = lambda attr: f + attr
# f = 42
# We check lineno because doing the following is fine:
# class A:
# x = 42
# y = lambda attr: x + attr
if (
isinstance(frame, nodes.ClassDef)
and node.name in frame.locals
and stmt.fromlineno <= defstmt.fromlineno
):
self.add_message(
"used-before-assignment",
args=node.name,
node=node,
confidence=HIGH,
)
elif self._is_only_type_assignment(node, defstmt):
if node.scope().locals.get(node.name):
self.add_message(
"used-before-assignment", args=node.name, node=node, confidence=HIGH
)
else:
self.add_message(
"undefined-variable", args=node.name, node=node, confidence=HIGH
)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
elif isinstance(defstmt, nodes.ClassDef):
is_first_level_ref = self._is_first_level_self_reference(node, defstmt)
if is_first_level_ref == 2:
self.add_message(
"used-before-assignment", node=node, args=node.name, confidence=HIGH
)
if is_first_level_ref:
return (VariableVisitConsumerAction.RETURN, None)
elif isinstance(defnode, nodes.NamedExpr):
if isinstance(defnode.parent, nodes.IfExp):
if self._is_never_evaluated(defnode, defnode.parent):
self.add_message(
"undefined-variable",
args=node.name,
node=node,
confidence=INFERENCE,
)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
@utils.check_messages("no-name-in-module")
def visit_import(self, node: nodes.Import) -> None:
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
if utils.is_node_in_guarded_import_block(node) is True:
# Don't verify import if part of guarded import block
# I.e. `sys.version_info` or `typing.TYPE_CHECKING`
return
for name, _ in node.names:
parts = name.split(".")
try:
module = next(_infer_name_module(node, parts[0]))
except astroid.ResolveError:
continue
if not isinstance(module, nodes.Module):
continue
self._check_module_attrs(node, module, parts[1:])
@utils.check_messages("no-name-in-module")
def visit_importfrom(self, node: nodes.ImportFrom) -> None:
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
if utils.is_node_in_guarded_import_block(node) is True:
# Don't verify import if part of guarded import block
# I.e. `sys.version_info` or `typing.TYPE_CHECKING`
return
name_parts = node.modname.split(".")
try:
module = node.do_import_module(name_parts[0])
except astroid.AstroidBuildingException:
return
module = self._check_module_attrs(node, module, name_parts[1:])
if not module:
return
for name, _ in node.names:
if name == "*":
continue
self._check_module_attrs(node, module, name.split("."))
@utils.check_messages(
"unbalanced-tuple-unpacking", "unpacking-non-sequence", "self-cls-assignment"
)
def visit_assign(self, node: nodes.Assign) -> None:
"""Check unbalanced tuple unpacking for assignments
and unpacking non-sequences as well as in case self/cls
get assigned.
"""
self._check_self_cls_assign(node)
if not isinstance(node.targets[0], (nodes.Tuple, nodes.List)):
return
targets = node.targets[0].itered()
try:
inferred = utils.safe_infer(node.value)
if inferred is not None:
self._check_unpacking(inferred, node, targets)
except astroid.InferenceError:
return
# listcomp have now also their scope
def visit_listcomp(self, node: nodes.ListComp) -> None:
"""visit dictcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_listcomp(self, _: nodes.ListComp) -> None:
"""leave dictcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def leave_assign(self, node: nodes.Assign) -> None:
self._store_type_annotation_names(node)
def leave_with(self, node: nodes.With) -> None:
self._store_type_annotation_names(node)
def visit_arguments(self, node: nodes.Arguments) -> None:
for annotation in node.type_comment_args:
self._store_type_annotation_node(annotation)
# Relying on other checker's options, which might not have been initialized yet.
@astroid.decorators.cachedproperty
def _analyse_fallback_blocks(self):
return get_global_option(self, "analyse-fallback-blocks", default=False)
@astroid.decorators.cachedproperty
def _ignored_modules(self):
return get_global_option(self, "ignored-modules", default=[])
@astroid.decorators.cachedproperty
def _allow_global_unused_variables(self):
return get_global_option(self, "allow-global-unused-variables", default=True)
@staticmethod
def _defined_in_function_definition(node, frame):
in_annotation_or_default_or_decorator = False
if (
isinstance(frame, nodes.FunctionDef)
and node.statement(future=True) is frame
):
in_annotation_or_default_or_decorator = (
(
node in frame.args.annotations
or node in frame.args.posonlyargs_annotations
or node in frame.args.kwonlyargs_annotations
or node is frame.args.varargannotation
or node is frame.args.kwargannotation
)
or frame.args.parent_of(node)
or (frame.decorators and frame.decorators.parent_of(node))
or (
frame.returns
and (node is frame.returns or frame.returns.parent_of(node))
)
)
return in_annotation_or_default_or_decorator
@staticmethod
def _in_lambda_or_comprehension_body(
node: nodes.NodeNG, frame: nodes.NodeNG
) -> bool:
"""return True if node within a lambda/comprehension body (or similar) and thus should not have access to class attributes in frame"""
child = node
parent = node.parent
while parent is not None:
if parent is frame:
return False
if isinstance(parent, nodes.Lambda) and child is not parent.args:
# Body of lambda should not have access to class attributes.
return True
if isinstance(parent, nodes.Comprehension) and child is not parent.iter:
# Only iter of list/set/dict/generator comprehension should have access.
return True
if isinstance(parent, nodes.ComprehensionScope) and not (
parent.generators and child is parent.generators[0]
):
# Body of list/set/dict/generator comprehension should not have access to class attributes.
# Furthermore, only the first generator (if multiple) in comprehension should have access.
return True
child = parent
parent = parent.parent
return False
@staticmethod
def _is_variable_violation(
node: nodes.Name,
defnode,
stmt: nodes.Statement,
defstmt: nodes.Statement,
frame, # scope of statement of node
defframe,
base_scope_type,
is_recursive_klass,
) -> Tuple[bool, bool, bool]:
# pylint: disable=too-many-nested-blocks
maybe_before_assign = True
annotation_return = False
use_outer_definition = False
if frame is not defframe:
maybe_before_assign = _detect_global_scope(node, frame, defframe)
elif defframe.parent is None:
# we are at the module level, check the name is not
# defined in builtins
if (
node.name in defframe.scope_attrs
or astroid.builtin_lookup(node.name)[1]
):
maybe_before_assign = False
else:
# we are in a local scope, check the name is not
# defined in global or builtin scope
# skip this lookup if name is assigned later in function scope/lambda
# Note: the node.frame() is not the same as the `frame` argument which is
# equivalent to frame.statement().scope()
forbid_lookup = (
isinstance(frame, nodes.FunctionDef)
or isinstance(node.frame(future=True), nodes.Lambda)
) and _assigned_locally(node)
if not forbid_lookup and defframe.root().lookup(node.name)[1]:
maybe_before_assign = False
use_outer_definition = stmt == defstmt and not isinstance(
defnode, nodes.Comprehension
)
# check if we have a nonlocal
elif node.name in defframe.locals:
maybe_before_assign = not any(
isinstance(child, nodes.Nonlocal) and node.name in child.names
for child in defframe.get_children()
)
if (
base_scope_type == "lambda"
and isinstance(frame, nodes.ClassDef)
and node.name in frame.locals
):
# This rule verifies that if the definition node of the
# checked name is an Arguments node and if the name
# is used a default value in the arguments defaults
# and the actual definition of the variable label
# is happening before the Arguments definition.
#
# bar = None
# foo = lambda bar=bar: bar
#
# In this case, maybe_before_assign should be False, otherwise
# it should be True.
maybe_before_assign = not (
isinstance(defnode, nodes.Arguments)
and node in defnode.defaults
and frame.locals[node.name][0].fromlineno < defstmt.fromlineno
)
elif isinstance(defframe, nodes.ClassDef) and isinstance(
frame, nodes.FunctionDef
):
# Special rule for function return annotations,
# using a name defined earlier in the class containing the function.
if node is frame.returns and defframe.parent_of(frame.returns):
annotation_return = True
if (
frame.returns.name in defframe.locals
and defframe.locals[node.name][0].lineno < frame.lineno
):
# Detect class assignments with a name defined earlier in the
# class. In this case, no warning should be raised.
maybe_before_assign = False
else:
maybe_before_assign = True
if isinstance(node.parent, nodes.Arguments):
maybe_before_assign = stmt.fromlineno <= defstmt.fromlineno
elif is_recursive_klass:
maybe_before_assign = True
else:
maybe_before_assign = (
maybe_before_assign and stmt.fromlineno <= defstmt.fromlineno
)
if maybe_before_assign and stmt.fromlineno == defstmt.fromlineno:
if (
isinstance(defframe, nodes.FunctionDef)
and frame is defframe
and defframe.parent_of(node)
and stmt is not defstmt
):
# Single statement function, with the statement on the
# same line as the function definition
maybe_before_assign = False
elif (
isinstance( # pylint: disable=too-many-boolean-expressions
defstmt,
(
nodes.Assign,
nodes.AnnAssign,
nodes.AugAssign,
nodes.Expr,
nodes.Return,
),
)
and (
isinstance(defstmt.value, nodes.IfExp)
or isinstance(defstmt.value, nodes.Lambda)
and isinstance(defstmt.value.body, nodes.IfExp)
)
and frame is defframe
and defframe.parent_of(node)
and stmt is defstmt
):
# Single statement if, with assignment expression on same
# line as assignment
# x = b if (b := True) else False
maybe_before_assign = False
elif (
isinstance( # pylint: disable=too-many-boolean-expressions
defnode, nodes.NamedExpr
)
and frame is defframe
and defframe.parent_of(stmt)
and stmt is defstmt
and (
(
defnode.lineno == node.lineno
and defnode.col_offset < node.col_offset
)
or (defnode.lineno < node.lineno)
or (
# Issue in the `ast` module until py39
# Nodes in a multiline string have the same lineno
# Could be false-positive without check
not PY39_PLUS
and defnode.lineno == node.lineno
and isinstance(
defstmt,
(
nodes.Assign,
nodes.AnnAssign,
nodes.AugAssign,
nodes.Return,
),
)
and isinstance(defstmt.value, nodes.JoinedStr)
)
)
):
# Expressions, with assignment expressions
# Use only after assignment
# b = (c := 2) and c
maybe_before_assign = False
# Look for type checking definitions inside a type checking guard.
if isinstance(defstmt, (nodes.Import, nodes.ImportFrom)):
defstmt_parent = defstmt.parent
if (
isinstance(defstmt_parent, nodes.If)
and defstmt_parent.test.as_string() in TYPING_TYPE_CHECKS_GUARDS
):
# Exempt those definitions that are used inside the type checking
# guard or that are defined in both type checking guard branches.
used_in_branch = defstmt_parent.parent_of(node)
defined_in_or_else = False
for definition in defstmt_parent.orelse:
if isinstance(definition, nodes.Assign):
defined_in_or_else = any(
target.name == node.name
for target in definition.targets
if isinstance(target, nodes.AssignName)
)
if defined_in_or_else:
break
if not used_in_branch and not defined_in_or_else:
maybe_before_assign = True
return maybe_before_assign, annotation_return, use_outer_definition
@staticmethod
def _is_only_type_assignment(node: nodes.Name, defstmt: nodes.Statement) -> bool:
"""Check if variable only gets assigned a type and never a value"""
if not isinstance(defstmt, nodes.AnnAssign) or defstmt.value:
return False
defstmt_frame = defstmt.frame(future=True)
node_frame = node.frame(future=True)
parent = node
while parent is not defstmt_frame.parent:
parent_scope = parent.scope()
local_refs = parent_scope.locals.get(node.name, [])
for ref_node in local_refs:
# If local ref is in the same frame as our node, but on a later lineno
# we don't actually care about this local ref.
# Local refs are ordered, so we break.
# print(var)
# var = 1 # <- irrelevant
if defstmt_frame == node_frame and ref_node.lineno > node.lineno:
break
# If the parent of the local reference is anything but an AnnAssign
# Or if the AnnAssign adds a value the variable will now have a value
# var = 1 # OR
# var: int = 1
if (
not isinstance(ref_node.parent, nodes.AnnAssign)
or ref_node.parent.value
):
return False
parent = parent_scope.parent
return True
@staticmethod
def _is_first_level_self_reference(
node: nodes.Name, defstmt: nodes.ClassDef
) -> Literal[0, 1, 2]:
"""Check if a first level method's annotation or default values
refers to its own class.
Return values correspond to:
0 = Continue
1 = Break
2 = Break + emit message
"""
if node.frame(future=True).parent == defstmt and node.statement(
future=True
) == node.frame(future=True):
# Check if used as type annotation
# Break but don't emit message if postponed evaluation is enabled
if utils.is_node_in_type_annotation_context(node):
if not utils.is_postponed_evaluation_enabled(node):
return 2
return 1
# Check if used as default value by calling the class
if isinstance(node.parent, nodes.Call) and isinstance(
node.parent.parent, nodes.Arguments
):
return 2
return 0
@staticmethod
def _is_never_evaluated(
defnode: nodes.NamedExpr, defnode_parent: nodes.IfExp
) -> bool:
"""Check if a NamedExpr is inside a side of if ... else that never
gets evaluated
"""
inferred_test = utils.safe_infer(defnode_parent.test)
if isinstance(inferred_test, nodes.Const):
if inferred_test.value is True and defnode == defnode_parent.orelse:
return True
if inferred_test.value is False and defnode == defnode_parent.body:
return True
return False
def _ignore_class_scope(self, node):
"""Return True if the node is in a local class scope, as an assignment.
:param node: Node considered
:type node: astroid.Node
:return: True if the node is in a local class scope, as an assignment. False otherwise.
:rtype: bool
"""
# Detect if we are in a local class scope, as an assignment.
# For example, the following is fair game.
#
# class A:
# b = 1
# c = lambda b=b: b * b
#
# class B:
# tp = 1
# def func(self, arg: tp):
# ...
# class C:
# tp = 2
# def func(self, arg=tp):
# ...
# class C:
# class Tp:
# pass
# class D(Tp):
# ...
name = node.name
frame = node.statement(future=True).scope()
in_annotation_or_default_or_decorator = self._defined_in_function_definition(
node, frame
)
in_ancestor_list = utils.is_ancestor_name(frame, node)
if in_annotation_or_default_or_decorator or in_ancestor_list:
frame_locals = frame.parent.scope().locals
else:
frame_locals = frame.locals
return not (
(isinstance(frame, nodes.ClassDef) or in_annotation_or_default_or_decorator)
and not self._in_lambda_or_comprehension_body(node, frame)
and name in frame_locals
)
def _loopvar_name(self, node: astroid.Name) -> None:
# filter variables according to node's scope
astmts = [s for s in node.lookup(node.name)[1] if hasattr(s, "assign_type")]
# If this variable usage exists inside a function definition
# that exists in the same loop,
# the usage is safe because the function will not be defined either if
# the variable is not defined.
scope = node.scope()
if isinstance(scope, nodes.FunctionDef) and any(
asmt.scope().parent_of(scope) for asmt in astmts
):
return
# Filter variables according to their respective scope. Test parent
# and statement to avoid #74747. This is not a total fix, which would
# introduce a mechanism similar to special attribute lookup in
# modules. Also, in order to get correct inference in this case, the
# scope lookup rules would need to be changed to return the initial
# assignment (which does not exist in code per se) as well as any later
# modifications.
# pylint: disable-next=too-many-boolean-expressions
if (
not astmts
or (
astmts[0].parent == astmts[0].root()
and astmts[0].parent.parent_of(node)
)
or (
astmts[0].is_statement
or not isinstance(astmts[0].parent, nodes.Module)
and astmts[0].statement(future=True).parent_of(node)
)
):
_astmts = []
else:
_astmts = astmts[:1]
for i, stmt in enumerate(astmts[1:]):
if astmts[i].statement(future=True).parent_of(
stmt
) and not in_for_else_branch(astmts[i].statement(future=True), stmt):
continue
_astmts.append(stmt)
astmts = _astmts
if len(astmts) != 1:
return
assign = astmts[0].assign_type()
if not (
isinstance(assign, (nodes.For, nodes.Comprehension, nodes.GeneratorExp))
and assign.statement(future=True) is not node.statement(future=True)
):
return
# For functions we can do more by inferring the length of the itered object
if not isinstance(assign, nodes.For):
self.add_message("undefined-loop-variable", args=node.name, node=node)
return
try:
inferred = next(assign.iter.infer())
except astroid.InferenceError:
self.add_message("undefined-loop-variable", args=node.name, node=node)
else:
if (
isinstance(inferred, astroid.Instance)
and inferred.qname() == BUILTIN_RANGE
):
# Consider range() objects safe, even if they might not yield any results.
return
# Consider sequences.
sequences = (
nodes.List,
nodes.Tuple,
nodes.Dict,
nodes.Set,
astroid.objects.FrozenSet,
)
if not isinstance(inferred, sequences):
self.add_message("undefined-loop-variable", args=node.name, node=node)
return
elements = getattr(inferred, "elts", getattr(inferred, "items", []))
if not elements:
self.add_message("undefined-loop-variable", args=node.name, node=node)
def _check_is_unused(self, name, node, stmt, global_names, nonlocal_names):
# Ignore some special names specified by user configuration.
if self._is_name_ignored(stmt, name):
return
# Ignore names that were added dynamically to the Function scope
if (
isinstance(node, nodes.FunctionDef)
and name == "__class__"
and len(node.locals["__class__"]) == 1
and isinstance(node.locals["__class__"][0], nodes.ClassDef)
):
return
# Ignore names imported by the global statement.
if isinstance(stmt, (nodes.Global, nodes.Import, nodes.ImportFrom)):
# Detect imports, assigned to global statements.
if global_names and _import_name_is_global(stmt, global_names):
return
argnames = list(
itertools.chain(node.argnames(), [arg.name for arg in node.args.kwonlyargs])
)
# Care about functions with unknown argument (builtins)
if name in argnames:
self._check_unused_arguments(name, node, stmt, argnames)
else:
if stmt.parent and isinstance(
stmt.parent, (nodes.Assign, nodes.AnnAssign, nodes.Tuple)
):
if name in nonlocal_names:
return
qname = asname = None
if isinstance(stmt, (nodes.Import, nodes.ImportFrom)):
# Need the complete name, which we don't have in .locals.
if len(stmt.names) > 1:
import_names = next(
(names for names in stmt.names if name in names), None
)
else:
import_names = stmt.names[0]
if import_names:
qname, asname = import_names
name = asname or qname
if _has_locals_call_after_node(stmt, node.scope()):
message_name = "possibly-unused-variable"
else:
if isinstance(stmt, nodes.Import):
if asname is not None:
msg = f"{qname} imported as {asname}"
else:
msg = f"import {name}"
self.add_message("unused-import", args=msg, node=stmt)
return
if isinstance(stmt, nodes.ImportFrom):
if asname is not None:
msg = f"{qname} imported from {stmt.modname} as {asname}"
else:
msg = f"{name} imported from {stmt.modname}"
self.add_message("unused-import", args=msg, node=stmt)
return
message_name = "unused-variable"
if isinstance(stmt, nodes.FunctionDef) and stmt.decorators:
return
# Don't check function stubs created only for type information
if utils.is_overload_stub(node):
return
# Special case for exception variable
if isinstance(stmt.parent, nodes.ExceptHandler) and any(
n.name == name for n in stmt.parent.nodes_of_class(nodes.Name)
):
return
self.add_message(message_name, args=name, node=stmt)
def _is_name_ignored(self, stmt, name):
authorized_rgx = self.config.dummy_variables_rgx
if (
isinstance(stmt, nodes.AssignName)
and isinstance(stmt.parent, nodes.Arguments)
or isinstance(stmt, nodes.Arguments)
):
regex = self.config.ignored_argument_names
else:
regex = authorized_rgx
return regex and regex.match(name)
def _check_unused_arguments(self, name, node, stmt, argnames):
is_method = node.is_method()
klass = node.parent.frame(future=True)
if is_method and isinstance(klass, nodes.ClassDef):
confidence = (
INFERENCE if utils.has_known_bases(klass) else INFERENCE_FAILURE
)
else:
confidence = HIGH
if is_method:
# Don't warn for the first argument of a (non static) method
if node.type != "staticmethod" and name == argnames[0]:
return
# Don't warn for argument of an overridden method
overridden = overridden_method(klass, node.name)
if overridden is not None and name in overridden.argnames():
return
if node.name in utils.PYMETHODS and node.name not in (
"__init__",
"__new__",
):
return
# Don't check callback arguments
if any(
node.name.startswith(cb) or node.name.endswith(cb)
for cb in self.config.callbacks
):
return
# Don't check arguments of singledispatch.register function.
if utils.is_registered_in_singledispatch_function(node):
return
# Don't check function stubs created only for type information
if utils.is_overload_stub(node):
return
# Don't check protocol classes
if utils.is_protocol_class(klass):
return
self.add_message("unused-argument", args=name, node=stmt, confidence=confidence)
def _check_late_binding_closure(self, node: nodes.Name) -> None:
"""Check whether node is a cell var that is assigned within a containing loop.
Special cases where we don't care about the error:
1. When the node's function is immediately called, e.g. (lambda: i)()
2. When the node's function is returned from within the loop, e.g. return lambda: i
"""
if not self.linter.is_message_enabled("cell-var-from-loop"):
return
node_scope = node.frame(future=True)
# If node appears in a default argument expression,
# look at the next enclosing frame instead
if utils.is_default_argument(node, node_scope):
node_scope = node_scope.parent.frame(future=True)
# Check if node is a cell var
if (
not isinstance(node_scope, (nodes.Lambda, nodes.FunctionDef))
or node.name in node_scope.locals
):
return
assign_scope, stmts = node.lookup(node.name)
if not stmts or not assign_scope.parent_of(node_scope):
return
if utils.is_comprehension(assign_scope):
self.add_message("cell-var-from-loop", node=node, args=node.name)
else:
# Look for an enclosing For loop.
# Currently, we only consider the first assignment
assignment_node = stmts[0]
maybe_for = assignment_node
while maybe_for and not isinstance(maybe_for, nodes.For):
if maybe_for is assign_scope:
break
maybe_for = maybe_for.parent
else:
if (
maybe_for
and maybe_for.parent_of(node_scope)
and not utils.is_being_called(node_scope)
and node_scope.parent
and not isinstance(node_scope.statement(future=True), nodes.Return)
):
self.add_message("cell-var-from-loop", node=node, args=node.name)
def _should_ignore_redefined_builtin(self, stmt):
if not isinstance(stmt, nodes.ImportFrom):
return False
return stmt.modname in self.config.redefining_builtins_modules
def _allowed_redefined_builtin(self, name):
return name in self.config.allowed_redefined_builtins
def _has_homonym_in_upper_function_scope(
self, node: nodes.Name, index: int
) -> bool:
"""Return whether there is a node with the same name in the
to_consume dict of an upper scope and if that scope is a
function
:param node: node to check for
:param index: index of the current consumer inside self._to_consume
:return: True if there is a node with the same name in the
to_consume dict of an upper scope and if that scope
is a function, False otherwise
"""
return any(
_consumer.scope_type == "function" and node.name in _consumer.to_consume
for _consumer in self._to_consume[index - 1 :: -1]
)
def _store_type_annotation_node(self, type_annotation):
"""Given a type annotation, store all the name nodes it refers to"""
if isinstance(type_annotation, nodes.Name):
self._type_annotation_names.append(type_annotation.name)
return
if isinstance(type_annotation, nodes.Attribute):
self._store_type_annotation_node(type_annotation.expr)
return
if not isinstance(type_annotation, nodes.Subscript):
return
if (
isinstance(type_annotation.value, nodes.Attribute)
and isinstance(type_annotation.value.expr, nodes.Name)
and type_annotation.value.expr.name == TYPING_MODULE
):
self._type_annotation_names.append(TYPING_MODULE)
return
self._type_annotation_names.extend(
annotation.name for annotation in type_annotation.nodes_of_class(nodes.Name)
)
def _store_type_annotation_names(self, node):
type_annotation = node.type_annotation
if not type_annotation:
return
self._store_type_annotation_node(node.type_annotation)
def _check_self_cls_assign(self, node: nodes.Assign) -> None:
"""Check that self/cls don't get assigned"""
assign_names: Set[Optional[str]] = set()
for target in node.targets:
if isinstance(target, nodes.AssignName):
assign_names.add(target.name)
elif isinstance(target, nodes.Tuple):
assign_names.update(
elt.name for elt in target.elts if isinstance(elt, nodes.AssignName)
)
scope = node.scope()
nonlocals_with_same_name = any(
child for child in scope.body if isinstance(child, nodes.Nonlocal)
)
if nonlocals_with_same_name:
scope = node.scope().parent.scope()
if not (
isinstance(scope, nodes.FunctionDef)
and scope.is_method()
and "builtins.staticmethod" not in scope.decoratornames()
):
return
argument_names = scope.argnames()
if not argument_names:
return
self_cls_name = argument_names[0]
if self_cls_name in assign_names:
self.add_message("self-cls-assignment", node=node, args=(self_cls_name,))
def _check_unpacking(self, inferred, node, targets):
"""Check for unbalanced tuple unpacking
and unpacking non sequences.
"""
if utils.is_inside_abstract_class(node):
return
if utils.is_comprehension(node):
return
if inferred is astroid.Uninferable:
return
if (
isinstance(inferred.parent, nodes.Arguments)
and isinstance(node.value, nodes.Name)
and node.value.name == inferred.parent.vararg
):
# Variable-length argument, we can't determine the length.
return
# Attempt to check unpacking is properly balanced
values: Optional[List] = None
if isinstance(inferred, (nodes.Tuple, nodes.List)):
values = inferred.itered()
elif isinstance(inferred, astroid.Instance) and any(
ancestor.qname() == "typing.NamedTuple" for ancestor in inferred.ancestors()
):
values = [i for i in inferred.values() if isinstance(i, nodes.AssignName)]
if values:
if len(targets) != len(values):
# Check if we have starred nodes.
if any(isinstance(target, nodes.Starred) for target in targets):
return
self.add_message(
"unbalanced-tuple-unpacking",
node=node,
args=(
_get_unpacking_extra_info(node, inferred),
len(targets),
len(values),
),
)
# attempt to check unpacking may be possible (ie RHS is iterable)
elif not utils.is_iterable(inferred):
self.add_message(
"unpacking-non-sequence",
node=node,
args=(_get_unpacking_extra_info(node, inferred),),
)
def _check_module_attrs(self, node, module, module_names):
"""check that module_names (list of string) are accessible through the
given module
if the latest access name corresponds to a module, return it
"""
while module_names:
name = module_names.pop(0)
if name == "__dict__":
module = None
break
try:
module = next(module.getattr(name)[0].infer())
if module is astroid.Uninferable:
return None
except astroid.NotFoundError:
if module.name in self._ignored_modules:
return None
self.add_message(
"no-name-in-module", args=(name, module.name), node=node
)
return None
except astroid.InferenceError:
return None
if module_names:
modname = module.name if module else "__dict__"
self.add_message(
"no-name-in-module", node=node, args=(".".join(module_names), modname)
)
return None
if isinstance(module, nodes.Module):
return module
return None
def _check_all(self, node: nodes.Module, not_consumed):
assigned = next(node.igetattr("__all__"))
if assigned is astroid.Uninferable:
return
if not assigned.pytype() in {"builtins.list", "builtins.tuple"}:
line, col = assigned.tolineno, assigned.col_offset
self.add_message("invalid-all-format", line=line, col_offset=col, node=node)
return
for elt in getattr(assigned, "elts", ()):
try:
elt_name = next(elt.infer())
except astroid.InferenceError:
continue
if elt_name is astroid.Uninferable:
continue
if not elt_name.parent:
continue
if not isinstance(elt_name, nodes.Const) or not isinstance(
elt_name.value, str
):
self.add_message("invalid-all-object", args=elt.as_string(), node=elt)
continue
elt_name = elt_name.value
# If elt is in not_consumed, remove it from not_consumed
if elt_name in not_consumed:
del not_consumed[elt_name]
continue
if elt_name not in node.locals:
if not node.package:
self.add_message(
"undefined-all-variable", args=(elt_name,), node=elt
)
else:
basename = os.path.splitext(node.file)[0]
if os.path.basename(basename) == "__init__":
name = node.name + "." + elt_name
try:
astroid.modutils.file_from_modpath(name.split("."))
except ImportError:
self.add_message(
"undefined-all-variable", args=(elt_name,), node=elt
)
except SyntaxError:
# don't yield a syntax-error warning,
# because it will be later yielded
# when the file will be checked
pass
def _check_globals(self, not_consumed):
if self._allow_global_unused_variables:
return
for name, node_lst in not_consumed.items():
for node in node_lst:
self.add_message("unused-variable", args=(name,), node=node)
def _check_imports(self, not_consumed):
local_names = _fix_dot_imports(not_consumed)
checked = set()
unused_wildcard_imports: DefaultDict[
Tuple[str, nodes.ImportFrom], List[str]
] = collections.defaultdict(list)
for name, stmt in local_names:
for imports in stmt.names:
real_name = imported_name = imports[0]
if imported_name == "*":
real_name = name
as_name = imports[1]
if real_name in checked:
continue
if name not in (real_name, as_name):
continue
checked.add(real_name)
is_type_annotation_import = (
imported_name in self._type_annotation_names
or as_name in self._type_annotation_names
)
if isinstance(stmt, nodes.Import) or (
isinstance(stmt, nodes.ImportFrom) and not stmt.modname
):
if isinstance(stmt, nodes.ImportFrom) and SPECIAL_OBJ.search(
imported_name
):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if is_type_annotation_import:
# Most likely a typing import if it wasn't used so far.
continue
if as_name == "_":
continue
if as_name is None:
msg = f"import {imported_name}"
else:
msg = f"{imported_name} imported as {as_name}"
if not _is_type_checking_import(stmt):
self.add_message("unused-import", args=msg, node=stmt)
elif isinstance(stmt, nodes.ImportFrom) and stmt.modname != FUTURE:
if SPECIAL_OBJ.search(imported_name):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if _is_from_future_import(stmt, name):
# Check if the name is in fact loaded from a
# __future__ import in another module.
continue
if is_type_annotation_import:
# Most likely a typing import if it wasn't used so far.
continue
if imported_name == "*":
unused_wildcard_imports[(stmt.modname, stmt)].append(name)
else:
if as_name is None:
msg = f"{imported_name} imported from {stmt.modname}"
else:
msg = f"{imported_name} imported from {stmt.modname} as {as_name}"
if not _is_type_checking_import(stmt):
self.add_message("unused-import", args=msg, node=stmt)
# Construct string for unused-wildcard-import message
for module, unused_list in unused_wildcard_imports.items():
if len(unused_list) == 1:
arg_string = unused_list[0]
else:
arg_string = (
f"{', '.join(i for i in unused_list[:-1])} and {unused_list[-1]}"
)
self.add_message(
"unused-wildcard-import", args=(arg_string, module[0]), node=module[1]
)
del self._to_consume
def _check_metaclasses(self, node):
"""Update consumption analysis for metaclasses."""
consumed = [] # [(scope_locals, consumed_key)]
for child_node in node.get_children():
if isinstance(child_node, nodes.ClassDef):
consumed.extend(self._check_classdef_metaclasses(child_node, node))
# Pop the consumed items, in order to avoid having
# unused-import and unused-variable false positives
for scope_locals, name in consumed:
scope_locals.pop(name, None)
def _check_classdef_metaclasses(self, klass, parent_node):
if not klass._metaclass:
# Skip if this class doesn't use explicitly a metaclass, but inherits it from ancestors
return []
consumed = [] # [(scope_locals, consumed_key)]
metaclass = klass.metaclass()
name = None
if isinstance(klass._metaclass, nodes.Name):
name = klass._metaclass.name
elif isinstance(klass._metaclass, nodes.Attribute) and klass._metaclass.expr:
attr = klass._metaclass.expr
while not isinstance(attr, nodes.Name):
attr = attr.expr
name = attr.name
elif metaclass:
name = metaclass.root().name
found = False
name = METACLASS_NAME_TRANSFORMS.get(name, name)
if name:
# check enclosing scopes starting from most local
for scope_locals, _, _, _ in self._to_consume[::-1]:
found_nodes = scope_locals.get(name, [])
for found_node in found_nodes:
if found_node.lineno <= klass.lineno:
consumed.append((scope_locals, name))
found = True
break
# Check parent scope
nodes_in_parent_scope = parent_node.locals.get(name, [])
for found_node_parent in nodes_in_parent_scope:
if found_node_parent.lineno <= klass.lineno:
found = True
break
if (
not found
and not metaclass
and not (
name in nodes.Module.scope_attrs
or utils.is_builtin(name)
or name in self.config.additional_builtins
)
):
self.add_message("undefined-variable", node=klass, args=(name,))
return consumed
def register(linter: "PyLinter") -> None:
linter.register_checker(VariablesChecker(linter))
| 1 | 20,529 | This is out of date right? Now that `_is_first_level_self_reference` uses it as well? Perhaps we should make the docstring more general and not refer to method names that could change. | PyCQA-pylint | py |
@@ -305,6 +305,7 @@ func createK8sAppMonitoring(
}
func testPromRemoteWriteWithTLS(t *testing.T) {
+ t.Parallel()
// can't extend the names since ns cannot be created with more than 63 characters
tests := []testFramework.PromRemoteWriteTestConfig{
// working configurations | 1 | // Copyright 2016 The prometheus-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"bytes"
"compress/gzip"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"time"
certutil "k8s.io/client-go/util/cert"
appsv1 "k8s.io/api/apps/v1"
"google.golang.org/protobuf/proto"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/prometheus-operator/prometheus-operator/pkg/alertmanager"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/prometheus-operator/prometheus-operator/pkg/operator"
"github.com/prometheus-operator/prometheus-operator/pkg/prometheus"
testFramework "github.com/prometheus-operator/prometheus-operator/test/framework"
"github.com/kylelemons/godebug/pretty"
"github.com/pkg/errors"
)
var (
certsDir = "../../test/e2e/remote_write_certs/"
possibleErrors = map[string]string{
"bad_server_cert": "tls: bad certificate",
"bad_client_cert": "tls: failed to verify client's certificate: x509: certificate signed by unknown authority",
"no_client_cert": "tls: client didn't provide a certificate",
}
)
func createK8sResources(t *testing.T, ns, certsDir string, cKey testFramework.Key, cCert, ca testFramework.Cert) {
var clientKey, clientCert, serverKey, serverCert, caCert []byte
var err error
if cKey.Filename != "" {
clientKey, err = ioutil.ReadFile(certsDir + cKey.Filename)
if err != nil {
t.Fatalf("failed to load %s: %v", cKey.Filename, err)
}
}
if cCert.Filename != "" {
clientCert, err = ioutil.ReadFile(certsDir + cCert.Filename)
if err != nil {
t.Fatalf("failed to load %s: %v", cCert.Filename, err)
}
}
if ca.Filename != "" {
caCert, err = ioutil.ReadFile(certsDir + ca.Filename)
if err != nil {
t.Fatalf("failed to load %s: %v", ca.Filename, err)
}
}
serverKey, err = ioutil.ReadFile(certsDir + "ca.key")
if err != nil {
t.Fatalf("failed to load %s: %v", "ca.key", err)
}
serverCert, err = ioutil.ReadFile(certsDir + "ca.crt")
if err != nil {
t.Fatalf("failed to load %s: %v", "ca.crt", err)
}
scrapingKey, err := ioutil.ReadFile(certsDir + "client.key")
if err != nil {
t.Fatalf("failed to load %s: %v", "client.key", err)
}
scrapingCert, err := ioutil.ReadFile(certsDir + "client.crt")
if err != nil {
t.Fatalf("failed to load %s: %v", "client.crt", err)
}
var s *v1.Secret
var cm *v1.ConfigMap
secrets := []*v1.Secret{}
configMaps := []*v1.ConfigMap{}
s = testFramework.MakeSecretWithCert(ns, "scraping-tls",
[]string{"key.pem", "cert.pem"}, [][]byte{scrapingKey, scrapingCert})
secrets = append(secrets, s)
s = testFramework.MakeSecretWithCert(ns, "server-tls",
[]string{"key.pem", "cert.pem"}, [][]byte{serverKey, serverCert})
secrets = append(secrets, s)
if cKey.Filename != "" && cCert.Filename != "" {
s = testFramework.MakeSecretWithCert(ns, cKey.SecretName,
[]string{"key.pem"}, [][]byte{clientKey})
secrets = append(secrets, s)
if cCert.ResourceType == testFramework.SECRET {
if cCert.ResourceName == cKey.SecretName {
s.Data["cert.pem"] = clientCert
} else {
s = testFramework.MakeSecretWithCert(ns, cCert.ResourceName,
[]string{"cert.pem"}, [][]byte{clientCert})
secrets = append(secrets, s)
}
} else if cCert.ResourceType == testFramework.CONFIGMAP {
cm = testFramework.MakeConfigMapWithCert(framework.KubeClient, ns, cCert.ResourceName,
"", "cert.pem", "", nil, clientCert, nil)
configMaps = append(configMaps, cm)
} else {
t.Fatal("cert must be a Secret or a ConfigMap")
}
}
if ca.Filename != "" {
if ca.ResourceType == testFramework.SECRET {
if ca.ResourceName == cKey.SecretName {
secrets[2].Data["ca.pem"] = caCert
} else if ca.ResourceName == cCert.ResourceName {
s.Data["ca.pem"] = caCert
} else {
s = testFramework.MakeSecretWithCert(ns, ca.ResourceName,
[]string{"ca.pem"}, [][]byte{caCert})
secrets = append(secrets, s)
}
} else if ca.ResourceType == testFramework.CONFIGMAP {
if ca.ResourceName == cCert.ResourceName {
cm.Data["ca.pem"] = string(caCert)
} else {
cm = testFramework.MakeConfigMapWithCert(framework.KubeClient, ns, ca.ResourceName,
"", "", "ca.pem", nil, nil, caCert)
configMaps = append(configMaps, cm)
}
} else {
t.Fatal("cert must be a Secret or a ConfigMap")
}
}
for _, s = range secrets {
_, err := framework.KubeClient.CoreV1().Secrets(s.ObjectMeta.Namespace).Create(context.Background(), s, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
}
for _, cm = range configMaps {
_, err := framework.KubeClient.CoreV1().ConfigMaps(ns).Create(context.Background(), cm, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
}
}
func createK8sSampleApp(t *testing.T, name, ns string) (string, int32) {
simple, err := testFramework.MakeDeployment("../../test/framework/resources/basic-auth-app-deployment.yaml")
if err != nil {
t.Fatal(err)
}
simple.Spec.Template.Spec.Containers[0].Args = []string{"--cert-path=/etc/certs"}
simple.Spec.Template.Spec.Volumes = []v1.Volume{
{
Name: "tls-certs",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: "server-tls",
},
},
},
}
simple.Spec.Template.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
{
Name: simple.Spec.Template.Spec.Volumes[0].Name,
MountPath: "/etc/certs",
},
}
if err := framework.CreateDeployment(context.Background(), ns, simple); err != nil {
t.Fatal("Creating simple basic auth app failed: ", err)
}
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"group": name,
},
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeLoadBalancer,
Ports: []v1.ServicePort{
{
Name: "web",
Port: 8080,
},
{
Name: "mtls",
Port: 8081,
},
},
Selector: map[string]string{
"group": name,
},
},
}
if _, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, svc); err != nil {
t.Fatal(err)
}
svc, err = framework.KubeClient.CoreV1().Services(ns).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
return svc.Spec.ClusterIP, svc.Spec.Ports[1].Port
}
func createK8sAppMonitoring(
name, ns string,
prwtc testFramework.PromRemoteWriteTestConfig,
svcIP string,
svcTLSPort int32,
) (*monitoringv1.Prometheus, error) {
sm := framework.MakeBasicServiceMonitor(name)
sm.Spec.Endpoints = []monitoringv1.Endpoint{
{
Port: "mtls",
Interval: "30s",
Scheme: "https",
TLSConfig: &monitoringv1.TLSConfig{
SafeTLSConfig: monitoringv1.SafeTLSConfig{
InsecureSkipVerify: true,
Cert: monitoringv1.SecretOrConfigMap{
Secret: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "scraping-tls",
},
Key: "cert.pem",
},
},
KeySecret: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "scraping-tls",
},
Key: "key.pem",
},
},
},
},
}
if _, err := framework.MonClientV1.ServiceMonitors(ns).Create(context.Background(), sm, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "creating ServiceMonitor failed")
}
prometheusCRD := framework.MakeBasicPrometheus(ns, name, name, 1)
url := "https://" + svcIP + ":" + fmt.Sprint(svcTLSPort)
framework.AddRemoteWriteWithTLSToPrometheus(prometheusCRD, url, prwtc)
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, prometheusCRD); err != nil {
return nil, err
}
promSVC := framework.MakePrometheusService(prometheusCRD.Name, name, v1.ServiceTypeClusterIP)
if _, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, promSVC); err != nil {
return nil, err
}
return prometheusCRD, nil
}
func testPromRemoteWriteWithTLS(t *testing.T) {
// can't extend the names since ns cannot be created with more than 63 characters
tests := []testFramework.PromRemoteWriteTestConfig{
// working configurations
{
Name: "variant-1",
ClientKey: testFramework.Key{
Filename: "client.key",
SecretName: "client-tls-key-cert-ca",
},
ClientCert: testFramework.Cert{
Filename: "client.crt",
ResourceName: "client-tls-key-cert-ca",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "ca.crt",
ResourceName: "client-tls-key-cert-ca",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "",
InsecureSkipVerify: false,
ShouldSuccess: true,
},
{
Name: "variant-2",
ClientKey: testFramework.Key{
Filename: "client.key",
SecretName: "client-tls-key",
},
ClientCert: testFramework.Cert{
Filename: "client.crt",
ResourceName: "client-tls-cert",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "ca.crt",
ResourceName: "client-tls-ca",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "",
InsecureSkipVerify: false,
ShouldSuccess: true,
},
{
Name: "variant-3",
ClientKey: testFramework.Key{
Filename: "client.key",
SecretName: "client-tls-key-cert",
},
ClientCert: testFramework.Cert{
Filename: "client.crt",
ResourceName: "client-tls-key-cert",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "ca.crt",
ResourceName: "client-tls-ca",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "",
InsecureSkipVerify: false,
ShouldSuccess: true,
},
{
Name: "variant-4",
ClientKey: testFramework.Key{
Filename: "client.key",
SecretName: "client-tls-key",
},
ClientCert: testFramework.Cert{
Filename: "client.crt",
ResourceName: "client-tls-cert-ca",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "ca.crt",
ResourceName: "client-tls-cert-ca",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "",
InsecureSkipVerify: false,
ShouldSuccess: true,
},
{
Name: "variant-5",
ClientKey: testFramework.Key{
Filename: "client.key",
SecretName: "client-tls-key-ca",
},
ClientCert: testFramework.Cert{
Filename: "client.crt",
ResourceName: "client-tls-cert",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "ca.crt",
ResourceName: "client-tls-key-ca",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "",
InsecureSkipVerify: false,
ShouldSuccess: true,
},
{
Name: "variant-6",
ClientKey: testFramework.Key{
Filename: "client.key",
SecretName: "client-tls-key",
},
ClientCert: testFramework.Cert{
Filename: "client.crt",
ResourceName: "client-tls-cert-ca",
ResourceType: testFramework.CONFIGMAP,
},
CA: testFramework.Cert{
Filename: "ca.crt",
ResourceName: "client-tls-cert-ca",
ResourceType: testFramework.CONFIGMAP,
},
ExpectedInLogs: "",
InsecureSkipVerify: false,
ShouldSuccess: true,
},
{
Name: "variant-7",
ClientKey: testFramework.Key{
Filename: "client.key",
SecretName: "client-tls-key",
},
ClientCert: testFramework.Cert{
Filename: "client.crt",
ResourceName: "client-tls-cert",
ResourceType: testFramework.CONFIGMAP,
},
CA: testFramework.Cert{
Filename: "ca.crt",
ResourceName: "client-tls-ca",
ResourceType: testFramework.CONFIGMAP,
},
ExpectedInLogs: "",
InsecureSkipVerify: false,
ShouldSuccess: true,
},
{
Name: "variant-8",
ClientKey: testFramework.Key{
Filename: "client.key",
SecretName: "client-tls-key-cert",
},
ClientCert: testFramework.Cert{
Filename: "client.crt",
ResourceName: "client-tls-key-cert",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "ca.crt",
ResourceName: "client-tls-ca",
ResourceType: testFramework.CONFIGMAP,
},
ExpectedInLogs: "",
InsecureSkipVerify: false,
ShouldSuccess: true,
},
{
Name: "variant-9",
ClientKey: testFramework.Key{
Filename: "client.key",
SecretName: "client-tls-key",
},
ClientCert: testFramework.Cert{
Filename: "client.crt",
ResourceName: "client-tls-cert",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "ca.crt",
ResourceName: "client-tls-ca",
ResourceType: testFramework.CONFIGMAP,
},
ExpectedInLogs: "",
InsecureSkipVerify: false,
ShouldSuccess: true,
},
{
Name: "variant-10",
ClientKey: testFramework.Key{
Filename: "client.key",
SecretName: "client-tls-key-ca",
},
ClientCert: testFramework.Cert{
Filename: "client.crt",
ResourceName: "client-tls-cert",
ResourceType: testFramework.CONFIGMAP,
},
CA: testFramework.Cert{
Filename: "ca.crt",
ResourceName: "client-tls-key-ca",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "",
InsecureSkipVerify: false,
ShouldSuccess: true,
},
{
Name: "variant-11",
ClientKey: testFramework.Key{
Filename: "client.key",
SecretName: "client-tls-key",
},
ClientCert: testFramework.Cert{
Filename: "client.crt",
ResourceName: "client-tls-cert",
ResourceType: testFramework.CONFIGMAP,
},
CA: testFramework.Cert{
Filename: "ca.crt",
ResourceName: "client-tls-ca",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "",
InsecureSkipVerify: false,
ShouldSuccess: true,
},
{
Name: "variant-12",
ClientKey: testFramework.Key{
Filename: "client.key",
SecretName: "client-tls-key-cert",
},
ClientCert: testFramework.Cert{
Filename: "client.crt",
ResourceName: "client-tls-key-cert",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "",
ResourceName: "",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "",
InsecureSkipVerify: true,
ShouldSuccess: true,
},
// non working configurations
// we will check it only for one configuration for simplicity - only one Secret
{
Name: "variant-13",
ClientKey: testFramework.Key{
Filename: "client.key",
SecretName: "client-tls-key-cert-ca",
},
ClientCert: testFramework.Cert{
Filename: "client.crt",
ResourceName: "client-tls-key-cert-ca",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "bad_ca.crt",
ResourceName: "client-tls-key-cert-ca",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "bad_server_cert",
InsecureSkipVerify: false,
ShouldSuccess: false,
},
{
Name: "variant-14",
ClientKey: testFramework.Key{
Filename: "client.key",
SecretName: "client-tls-key-cert",
},
ClientCert: testFramework.Cert{
Filename: "client.crt",
ResourceName: "client-tls-key-cert",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "",
ResourceName: "",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "bad_server_cert",
InsecureSkipVerify: false,
ShouldSuccess: false,
},
{
Name: "variant-15",
ClientKey: testFramework.Key{
Filename: "bad_client.key",
SecretName: "client-tls-key-cert-ca",
},
ClientCert: testFramework.Cert{
Filename: "bad_client.crt",
ResourceName: "client-tls-key-cert-ca",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "bad_ca.crt",
ResourceName: "client-tls-key-cert-ca",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "bad_server_cert",
InsecureSkipVerify: false,
ShouldSuccess: false,
},
{
Name: "variant-16",
ClientKey: testFramework.Key{
Filename: "bad_client.key",
SecretName: "client-tls-key-cert",
},
ClientCert: testFramework.Cert{
Filename: "bad_client.crt",
ResourceName: "client-tls-key-cert",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "",
ResourceName: "",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "bad_server_cert",
InsecureSkipVerify: false,
ShouldSuccess: false,
},
{
Name: "variant-17",
ClientKey: testFramework.Key{
Filename: "",
SecretName: "",
},
ClientCert: testFramework.Cert{
Filename: "",
ResourceName: "",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "bad_ca.crt",
ResourceName: "client-tls-ca",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "bad_server_cert",
InsecureSkipVerify: false,
ShouldSuccess: false,
},
{
Name: "variant-18",
ClientKey: testFramework.Key{
Filename: "",
SecretName: "",
},
ClientCert: testFramework.Cert{
Filename: "",
ResourceName: "",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "",
ResourceName: "",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "bad_server_cert",
InsecureSkipVerify: false,
ShouldSuccess: false,
},
{
Name: "variant-19",
ClientKey: testFramework.Key{
Filename: "bad_client.key",
SecretName: "client-tls-key-cert-ca",
},
ClientCert: testFramework.Cert{
Filename: "bad_client.crt",
ResourceName: "client-tls-key-cert-ca",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "ca.crt",
ResourceName: "client-tls-key-cert-ca",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "bad_client_cert",
InsecureSkipVerify: false,
ShouldSuccess: false,
},
{
Name: "variant-20",
ClientKey: testFramework.Key{
Filename: "",
SecretName: "",
},
ClientCert: testFramework.Cert{
Filename: "",
ResourceName: "",
ResourceType: testFramework.SECRET,
},
CA: testFramework.Cert{
Filename: "ca.crt",
ResourceName: "client-tls-ca",
ResourceType: testFramework.SECRET,
},
ExpectedInLogs: "no_client_cert",
InsecureSkipVerify: false,
ShouldSuccess: false,
},
}
for _, test := range tests {
test := test
t.Run(test.Name, func(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "test"
// apply authorized certificate and key to k8s as a Secret
createK8sResources(t, ns, certsDir, test.ClientKey, test.ClientCert, test.CA)
// Setup a sample-app which supports mTLS therefore will play 2 roles:
// 1. app scraped by prometheus
// 2. TLS receiver for prometheus remoteWrite
svcIP, svcTLSPort := createK8sSampleApp(t, name, ns)
// Setup monitoring.
prometheusCRD, err := createK8sAppMonitoring(name, ns, test, svcIP, svcTLSPort)
if err != nil {
t.Fatal(err)
}
// Check for proper scraping.
promSVC := framework.MakePrometheusService(name, name, v1.ServiceTypeClusterIP)
if err := framework.WaitForHealthyTargets(context.Background(), ns, promSVC.Name, 1); err != nil {
framework.PrintPrometheusLogs(context.Background(), t, prometheusCRD)
t.Fatal(err)
}
//TODO: make it wait by poll, there are some examples in other tests
// use wait.Poll() in k8s.io/[email protected]/pkg/util/wait/wait.go
time.Sleep(45 * time.Second)
appOpts := metav1.ListOptions{
LabelSelector: fields.SelectorFromSet(fields.Set(map[string]string{
"group": name,
})).String(),
}
appPodList, err := framework.KubeClient.CoreV1().Pods(ns).List(context.Background(), appOpts)
if err != nil {
t.Fatal(err)
}
appLogs, err := framework.GetLogs(context.Background(), ns, appPodList.Items[0].ObjectMeta.Name, "")
if err != nil {
t.Fatal(err)
}
if test.ShouldSuccess {
for _, v := range possibleErrors {
if strings.Contains(appLogs, v) {
framework.PrintPrometheusLogs(context.Background(), t, prometheusCRD)
t.Fatalf("test with (%s, %s, %s) failed\nscraped app logs shouldn't contain '%s' but it does",
test.ClientKey.Filename, test.ClientCert.Filename, test.CA.Filename, v)
}
}
} else if !strings.Contains(appLogs, possibleErrors[test.ExpectedInLogs]) {
framework.PrintPrometheusLogs(context.Background(), t, prometheusCRD)
t.Fatalf("test with (%s, %s, %s) failed\nscraped app logs should contain '%s' but it doesn't",
test.ClientKey.Filename, test.ClientCert.Filename, test.CA.Filename, possibleErrors[test.ExpectedInLogs])
}
})
}
}
func testPromCreateDeleteCluster(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "test"
prometheusCRD := framework.MakeBasicPrometheus(ns, name, name, 1)
prometheusCRD.Namespace = ns
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, prometheusCRD); err != nil {
t.Fatal(err)
}
if err := framework.DeletePrometheusAndWaitUntilGone(context.Background(), ns, name); err != nil {
t.Fatal(err)
}
}
func testPromScaleUpDownCluster(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "test"
p, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, framework.MakeBasicPrometheus(ns, name, name, 1))
if err != nil {
t.Fatal(err)
}
p.Spec.Replicas = proto.Int32(3)
p, err = framework.UpdatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
p.Spec.Replicas = proto.Int32(2)
_, err = framework.UpdatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
}
func testPromNoServiceMonitorSelector(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "test"
p := framework.MakeBasicPrometheus(ns, name, name, 1)
p.Spec.ServiceMonitorSelector = nil
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p); err != nil {
t.Fatal(err)
}
}
func testPromVersionMigration(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "test"
startVersion := operator.PrometheusCompatibilityMatrix[0]
compatibilityMatrix := operator.PrometheusCompatibilityMatrix[1:]
p := framework.MakeBasicPrometheus(ns, name, name, 1)
p.Spec.Version = startVersion
p, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
for _, v := range compatibilityMatrix {
p.Spec.Version = v
p, err = framework.UpdatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
if err := framework.WaitForPrometheusRunImageAndReady(context.Background(), ns, p); err != nil {
t.Fatal(err)
}
}
}
func testPromResourceUpdate(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "test"
p := framework.MakeBasicPrometheus(ns, name, name, 1)
p.Spec.Resources = v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100Mi"),
},
}
p, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
pods, err := framework.KubeClient.CoreV1().Pods(ns).List(context.Background(), prometheus.ListOptions(name))
if err != nil {
t.Fatal(err)
}
res := pods.Items[0].Spec.Containers[0].Resources
if !reflect.DeepEqual(res, p.Spec.Resources) {
t.Fatalf("resources don't match. Has %#+v, want %#+v", res, p.Spec.Resources)
}
p.Spec.Resources = v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("200Mi"),
},
}
p, err = framework.MonClientV1.Prometheuses(ns).Update(context.Background(), p, metav1.UpdateOptions{})
if err != nil {
t.Fatal(err)
}
err = wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
pods, err := framework.KubeClient.CoreV1().Pods(ns).List(context.Background(), prometheus.ListOptions(name))
if err != nil {
return false, err
}
if len(pods.Items) != 1 {
return false, nil
}
res = pods.Items[0].Spec.Containers[0].Resources
if !reflect.DeepEqual(res, p.Spec.Resources) {
return false, nil
}
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
func testPromStorageLabelsAnnotations(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "test"
p := framework.MakeBasicPrometheus(ns, name, name, 1)
p.Spec.Storage = &monitoringv1.StorageSpec{
VolumeClaimTemplate: monitoringv1.EmbeddedPersistentVolumeClaim{
EmbeddedObjectMetadata: monitoringv1.EmbeddedObjectMetadata{
Labels: map[string]string{
"test-label": "foo",
},
Annotations: map[string]string{
"test-annotation": "bar",
},
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("200Mi"),
},
},
},
},
}
p, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
if val := p.Spec.Storage.VolumeClaimTemplate.Labels["test-label"]; val != "foo" {
t.Errorf("incorrect volume claim label, want: %v, got: %v", "foo", val)
}
if val := p.Spec.Storage.VolumeClaimTemplate.Annotations["test-annotation"]; val != "bar" {
t.Errorf("incorrect volume claim annotation, want: %v, got: %v", "bar", val)
}
err = wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
sts, err := framework.KubeClient.AppsV1().StatefulSets(ns).List(context.Background(), metav1.ListOptions{})
if err != nil {
return false, err
}
if len(sts.Items) < 1 {
return false, nil
}
for _, vct := range sts.Items[0].Spec.VolumeClaimTemplates {
if vct.Name == "prometheus-"+name+"-db" {
if val := vct.Labels["test-label"]; val != "foo" {
return false, errors.Errorf("incorrect volume claim label on sts, want: %v, got: %v", "foo", val)
}
if val := vct.Annotations["test-annotation"]; val != "bar" {
return false, errors.Errorf("incorrect volume claim annotation on sts, want: %v, got: %v", "bar", val)
}
return true, nil
}
}
return false, nil
})
if err != nil {
t.Fatal(err)
}
}
func testPromStorageUpdate(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "test"
p := framework.MakeBasicPrometheus(ns, name, name, 1)
p, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
p.Spec.Storage = &monitoringv1.StorageSpec{
VolumeClaimTemplate: monitoringv1.EmbeddedPersistentVolumeClaim{
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("200Mi"),
},
},
},
},
}
_, err = framework.MonClientV1.Prometheuses(ns).Update(context.Background(), p, metav1.UpdateOptions{})
if err != nil {
t.Fatal(err)
}
err = wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
pods, err := framework.KubeClient.CoreV1().Pods(ns).List(context.Background(), prometheus.ListOptions(name))
if err != nil {
return false, err
}
if len(pods.Items) != 1 {
return false, nil
}
for _, volume := range pods.Items[0].Spec.Volumes {
if volume.Name == "prometheus-"+name+"-db" && volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.ClaimName != "" {
return true, nil
}
}
return false, nil
})
if err != nil {
t.Fatal(err)
}
}
func testPromReloadConfig(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "test"
p := framework.MakeBasicPrometheus(ns, name, name, 1)
p.Spec.ServiceMonitorSelector = nil
p.Spec.PodMonitorSelector = nil
firstConfig := `
global:
scrape_interval: 1m
scrape_configs:
- job_name: testReloadConfig
metrics_path: /metrics
static_configs:
- targets:
- 111.111.111.111:9090
`
var bufOne bytes.Buffer
if err := gzipConfig(&bufOne, []byte(firstConfig)); err != nil {
t.Fatal(err)
}
firstConfigCompressed := bufOne.Bytes()
cfg := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("prometheus-%s", name),
},
Data: map[string][]byte{
"prometheus.yaml.gz": firstConfigCompressed,
"configmaps.json": []byte("{}"),
},
}
svc := framework.MakePrometheusService(p.Name, "not-relevant", v1.ServiceTypeClusterIP)
if _, err := framework.KubeClient.CoreV1().Secrets(ns).Create(context.Background(), cfg, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p); err != nil {
t.Fatal(err)
}
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, svc); err != nil {
t.Fatal(err)
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
if err := framework.WaitForActiveTargets(context.Background(), ns, svc.Name, 1); err != nil {
t.Fatal(err)
}
secondConfig := `
global:
scrape_interval: 1m
scrape_configs:
- job_name: testReloadConfig
metrics_path: /metrics
static_configs:
- targets:
- 111.111.111.111:9090
- 111.111.111.112:9090
`
var bufTwo bytes.Buffer
if err := gzipConfig(&bufTwo, []byte(secondConfig)); err != nil {
t.Fatal(err)
}
secondConfigCompressed := bufTwo.Bytes()
cfg, err := framework.KubeClient.CoreV1().Secrets(ns).Get(context.Background(), cfg.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(errors.Wrap(err, "could not retrieve previous secret"))
}
cfg.Data["prometheus.yaml.gz"] = secondConfigCompressed
if _, err := framework.KubeClient.CoreV1().Secrets(ns).Update(context.Background(), cfg, metav1.UpdateOptions{}); err != nil {
t.Fatal(err)
}
if err := framework.WaitForActiveTargets(context.Background(), ns, svc.Name, 2); err != nil {
t.Fatal(err)
}
}
func testPromAdditionalScrapeConfig(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
prometheusName := "test"
group := "additional-config-test"
svc := framework.MakePrometheusService(prometheusName, group, v1.ServiceTypeClusterIP)
s := framework.MakeBasicServiceMonitor(group)
if _, err := framework.MonClientV1.ServiceMonitors(ns).Create(context.Background(), s, metav1.CreateOptions{}); err != nil {
t.Fatal("Creating ServiceMonitor failed: ", err)
}
additionalConfig := `
- job_name: "prometheus"
static_configs:
- targets: ["localhost:9090"]
`
secret := v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "additional-scrape-configs",
},
Data: map[string][]byte{
"prometheus-additional.yaml": []byte(additionalConfig),
},
}
_, err := framework.KubeClient.CoreV1().Secrets(ns).Create(context.Background(), &secret, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
p := framework.MakeBasicPrometheus(ns, prometheusName, group, 1)
p.Spec.AdditionalScrapeConfigs = &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "additional-scrape-configs",
},
Key: "prometheus-additional.yaml",
}
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p); err != nil {
t.Fatal(err)
}
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, svc); err != nil {
t.Fatal(errors.Wrap(err, "creating prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
// Wait for ServiceMonitor target, as well as additional-config target
if err := framework.WaitForActiveTargets(context.Background(), ns, svc.Name, 2); err != nil {
t.Fatal(err)
}
}
func testPromAdditionalAlertManagerConfig(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
prometheusName := "test"
group := "additional-alert-config-test"
svc := framework.MakePrometheusService(prometheusName, group, v1.ServiceTypeClusterIP)
s := framework.MakeBasicServiceMonitor(group)
if _, err := framework.MonClientV1.ServiceMonitors(ns).Create(context.Background(), s, metav1.CreateOptions{}); err != nil {
t.Fatal("Creating ServiceMonitor failed: ", err)
}
additionalConfig := `
- path_prefix: /
scheme: http
static_configs:
- targets: ["localhost:9093"]
`
secret := v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "additional-alert-configs",
},
Data: map[string][]byte{
"prometheus-additional.yaml": []byte(additionalConfig),
},
}
_, err := framework.KubeClient.CoreV1().Secrets(ns).Create(context.Background(), &secret, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
p := framework.MakeBasicPrometheus(ns, prometheusName, group, 1)
p.Spec.AdditionalAlertManagerConfigs = &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "additional-alert-configs",
},
Key: "prometheus-additional.yaml",
}
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p); err != nil {
t.Fatal(err)
}
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, svc); err != nil {
t.Fatal(errors.Wrap(err, "creating prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
// Wait for ServiceMonitor target
if err := framework.WaitForActiveTargets(context.Background(), ns, svc.Name, 1); err != nil {
t.Fatal(err)
}
err = wait.Poll(time.Second, 5*time.Minute, func() (done bool, err error) {
response, err := framework.PrometheusSVCGetRequest(context.Background(), ns, svc.Name, "/api/v1/alertmanagers", map[string]string{})
if err != nil {
return true, err
}
ra := prometheusAlertmanagerAPIResponse{}
if err := json.NewDecoder(bytes.NewBuffer(response)).Decode(&ra); err != nil {
return true, err
}
if ra.Status == "success" && len(ra.Data.ActiveAlertmanagers) == 1 {
return true, nil
}
return false, nil
})
if err != nil {
t.Fatal(errors.Wrap(err, "validating Prometheus Alertmanager configuration failed"))
}
}
func testPromReloadRules(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "test"
firtAlertName := "firstAlert"
secondAlertName := "secondAlert"
ruleFile, err := framework.MakeAndCreateFiringRule(context.Background(), ns, name, firtAlertName)
if err != nil {
t.Fatal(err)
}
p := framework.MakeBasicPrometheus(ns, name, name, 1)
p.Spec.EvaluationInterval = "1s"
p, err = framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
pSVC := framework.MakePrometheusService(p.Name, "not-relevant", v1.ServiceTypeClusterIP)
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, pSVC); err != nil {
t.Fatal(errors.Wrap(err, "creating Prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
err = framework.WaitForPrometheusFiringAlert(context.Background(), p.Namespace, pSVC.Name, firtAlertName)
if err != nil {
t.Fatal(err)
}
ruleFile.Spec.Groups = []monitoringv1.RuleGroup{
{
Name: "my-alerting-group",
Rules: []monitoringv1.Rule{
{
Alert: secondAlertName,
Expr: intstr.FromString("vector(1)"),
},
},
},
}
_, err = framework.UpdateRule(context.Background(), ns, ruleFile)
if err != nil {
t.Fatal(err)
}
err = framework.WaitForPrometheusFiringAlert(context.Background(), p.Namespace, pSVC.Name, secondAlertName)
if err != nil {
t.Fatal(err)
}
}
func testPromMultiplePrometheusRulesSameNS(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "test"
alertNames := []string{"first-alert", "second-alert"}
for _, alertName := range alertNames {
_, err := framework.MakeAndCreateFiringRule(context.Background(), ns, alertName, alertName)
if err != nil {
t.Fatal(err)
}
}
p := framework.MakeBasicPrometheus(ns, name, name, 1)
p.Spec.EvaluationInterval = "1s"
p, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
pSVC := framework.MakePrometheusService(p.Name, "not-relevant", v1.ServiceTypeClusterIP)
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, pSVC); err != nil {
t.Fatal(errors.Wrap(err, "creating Prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
for _, alertName := range alertNames {
err := framework.WaitForPrometheusFiringAlert(context.Background(), p.Namespace, pSVC.Name, alertName)
if err != nil {
t.Fatal(err)
}
}
}
func testPromMultiplePrometheusRulesDifferentNS(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
rootNS := framework.CreateNamespace(context.Background(), t, testCtx)
alertNSOne := framework.CreateNamespace(context.Background(), t, testCtx)
alertNSTwo := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, rootNS)
name := "test"
ruleFiles := []struct {
alertName string
ns string
}{{"first-alert", alertNSOne}, {"second-alert", alertNSTwo}}
ruleFilesNamespaceSelector := map[string]string{"monitored": "true"}
for _, file := range ruleFiles {
err := framework.AddLabelsToNamespace(context.Background(), file.ns, ruleFilesNamespaceSelector)
if err != nil {
t.Fatal(err)
}
}
for _, file := range ruleFiles {
_, err := framework.MakeAndCreateFiringRule(context.Background(), file.ns, file.alertName, file.alertName)
if err != nil {
t.Fatal(err)
}
}
p := framework.MakeBasicPrometheus(rootNS, name, name, 1)
p.Spec.EvaluationInterval = "1s"
p.Spec.RuleNamespaceSelector = &metav1.LabelSelector{
MatchLabels: ruleFilesNamespaceSelector,
}
p, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), rootNS, p)
if err != nil {
t.Fatal(err)
}
pSVC := framework.MakePrometheusService(p.Name, "not-relevant", v1.ServiceTypeClusterIP)
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), rootNS, pSVC); err != nil {
t.Fatal(errors.Wrap(err, "creating Prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
for _, file := range ruleFiles {
err := framework.WaitForPrometheusFiringAlert(context.Background(), p.Namespace, pSVC.Name, file.alertName)
if err != nil {
t.Fatal(err)
}
}
// Remove the selecting label from the namespaces holding PrometheusRules
// and wait until the rules are removed from Prometheus.
// See https://github.com/prometheus-operator/prometheus-operator/issues/3847
for _, file := range ruleFiles {
if err := framework.RemoveLabelsFromNamespace(context.Background(), file.ns, "monitored"); err != nil {
t.Fatal(err)
}
}
for _, file := range ruleFiles {
var loopError error
err = wait.Poll(time.Second, 5*framework.DefaultTimeout, func() (bool, error) {
var firing bool
firing, loopError = framework.CheckPrometheusFiringAlert(context.Background(), file.ns, pSVC.Name, file.alertName)
return !firing, nil
})
if err != nil {
t.Fatalf("waiting for alert %q in namespace %s to stop firing: %v: %v", file.alertName, file.ns, err, loopError)
}
}
}
func testPromRulesExceedingConfigMapLimit(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
prometheusRules := []*monitoringv1.PrometheusRule{}
for i := 0; i < 2; i++ {
rule := generateHugePrometheusRule(ns, strconv.Itoa(i))
rule, err := framework.CreateRule(context.Background(), ns, rule)
if err != nil {
t.Fatal(err)
}
prometheusRules = append(prometheusRules, rule)
}
name := "test"
p := framework.MakeBasicPrometheus(ns, name, name, 1)
p.Spec.EvaluationInterval = "1s"
p, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
defer func() {
if t.Failed() {
if err := framework.PrintPodLogs(context.Background(), ns, "prometheus-"+p.Name+"-0"); err != nil {
t.Fatal(err)
}
}
}()
pSVC := framework.MakePrometheusService(p.Name, "not-relevant", v1.ServiceTypeClusterIP)
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, pSVC); err != nil {
t.Fatal(errors.Wrap(err, "creating Prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
for i := range prometheusRules {
_, err := framework.WaitForConfigMapExist(context.Background(), ns, "prometheus-"+p.Name+"-rulefiles-"+strconv.Itoa(i))
if err != nil {
t.Fatal(err)
}
}
// Make sure both rule files ended up in the Prometheus Pod
for i := range prometheusRules {
err := framework.WaitForPrometheusFiringAlert(context.Background(), ns, pSVC.Name, "my-alert-"+strconv.Itoa(i))
if err != nil {
t.Fatal(err)
}
}
err = framework.DeleteRule(context.Background(), ns, prometheusRules[1].Name)
if err != nil {
t.Fatal(err)
}
_, err = framework.WaitForConfigMapExist(context.Background(), ns, "prometheus-"+p.Name+"-rulefiles-0")
if err != nil {
t.Fatal(err)
}
err = framework.WaitForConfigMapNotExist(context.Background(), ns, "prometheus-"+p.Name+"-rulefiles-1")
if err != nil {
t.Fatal(err)
}
err = framework.WaitForPrometheusFiringAlert(context.Background(), ns, pSVC.Name, "my-alert-0")
if err != nil {
t.Fatal(err)
}
}
func testPromRulesMustBeAnnotated(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "admission"
admissionAlert := "admissionAlert"
_, err := framework.MakeAndCreateFiringRule(context.Background(), ns, name, admissionAlert)
if err != nil {
t.Fatal(err)
}
rule, err := framework.GetRule(context.Background(), ns, name)
if err != nil {
t.Fatal(err)
}
val, ok := rule.Annotations["prometheus-operator-validated"]
if !ok {
t.Fatal("Expected prometheusrule to be annotated")
}
if val != "true" {
t.Fatal("Expected prometheusrule annotation to be 'true'")
}
}
func testInvalidRulesAreRejected(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "admission"
admissionAlert := "admissionAlert"
_, err := framework.MakeAndCreateInvalidRule(context.Background(), ns, name, admissionAlert)
if err == nil {
t.Fatal("Expected invalid prometheusrule to be rejected")
}
}
// generateHugePrometheusRule returns a Prometheus rule instance that would fill
// more than half of the space of a Kubernetes ConfigMap.
func generateHugePrometheusRule(ns, identifier string) *monitoringv1.PrometheusRule {
alertName := "my-alert"
groups := []monitoringv1.RuleGroup{
{
Name: alertName,
Rules: []monitoringv1.Rule{},
},
}
// One rule marshaled as yaml is ~34 bytes long, the max is ~524288 bytes.
for i := 0; i < 12000; i++ {
groups[0].Rules = append(groups[0].Rules, monitoringv1.Rule{
Alert: alertName + "-" + identifier,
Expr: intstr.FromString("vector(1)"),
})
}
rule := framework.MakeBasicRule(ns, "prometheus-rule-"+identifier, groups)
return rule
}
// Make sure the Prometheus operator only updates the Prometheus config secret
// and the Prometheus rules configmap on relevant changes
func testPromOnlyUpdatedOnRelevantChanges(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "test"
prometheus := framework.MakeBasicPrometheus(ns, name, name, 1)
// Adding an annotation to Prometheus lead to high CPU usage in the past
// updating the Prometheus StatefulSet in a loop (See
// https://github.com/prometheus-operator/prometheus-operator/issues/1659). Added here to
// prevent a regression.
prometheus.Annotations["test-annotation"] = "test-value"
ctx, cancel := context.WithCancel(context.Background())
type versionedResource interface {
GetResourceVersion() string
}
resourceDefinitions := []struct {
Name string
Getter func(prometheusName string) (versionedResource, error)
Versions map[string]interface{}
MaxExpectedChanges int
}{
{
Name: "prometheus",
Getter: func(prometheusName string) (versionedResource, error) {
return framework.
MonClientV1.
Prometheuses(ns).
Get(context.Background(), prometheusName, metav1.GetOptions{})
},
MaxExpectedChanges: 1,
},
{
Name: "rulesConfigMap",
Getter: func(prometheusName string) (versionedResource, error) {
return framework.
KubeClient.
CoreV1().
ConfigMaps(ns).
Get(context.Background(), "prometheus-"+prometheusName+"-rulefiles-0", metav1.GetOptions{})
},
// The Prometheus Operator first creates the ConfigMap for the
// given Prometheus stateful set and then updates it with the matching
// Prometheus rules.
MaxExpectedChanges: 2,
},
{
Name: "configurationSecret",
Getter: func(prometheusName string) (versionedResource, error) {
return framework.
KubeClient.
CoreV1().
Secrets(ns).
Get(context.Background(), "prometheus-"+prometheusName, metav1.GetOptions{})
},
MaxExpectedChanges: 2,
},
{
Name: "tlsAssetSecret",
Getter: func(prometheusName string) (versionedResource, error) {
return framework.
KubeClient.
CoreV1().
Secrets(ns).
Get(context.Background(), "prometheus-"+prometheusName+"-tls-assets", metav1.GetOptions{})
},
MaxExpectedChanges: 2,
},
{
Name: "statefulset",
Getter: func(prometheusName string) (versionedResource, error) {
return framework.
KubeClient.
AppsV1().
StatefulSets(ns).
Get(context.Background(), "prometheus-"+prometheusName, metav1.GetOptions{})
},
// First is the creation of the StatefulSet itself, following is the
// update of e.g. the ReadyReplicas status field
MaxExpectedChanges: 3,
},
{
Name: "service-operated",
Getter: func(prometheusName string) (versionedResource, error) {
return framework.
KubeClient.
CoreV1().
Services(ns).
Get(context.Background(), "prometheus-operated", metav1.GetOptions{})
},
MaxExpectedChanges: 1,
},
{
Name: "serviceMonitor",
Getter: func(prometheusName string) (versionedResource, error) {
return framework.
MonClientV1.
ServiceMonitors(ns).
Get(context.Background(), prometheusName, metav1.GetOptions{})
},
MaxExpectedChanges: 1,
},
}
// Init Versions maps
for i := range resourceDefinitions {
resourceDefinitions[i].Versions = map[string]interface{}{}
}
errc := make(chan error, 1)
go func() {
for {
select {
case <-ctx.Done():
return
default:
time.Sleep(10 * time.Millisecond)
for i, resourceDef := range resourceDefinitions {
resource, err := resourceDef.Getter(prometheus.Name)
if apierrors.IsNotFound(err) {
continue
}
if err != nil {
cancel()
errc <- err
return
}
resourceDefinitions[i].Versions[resource.GetResourceVersion()] = resource
}
}
}
}()
alertName := "my-alert"
if _, err := framework.MakeAndCreateFiringRule(context.Background(), ns, "my-prometheus-rule", alertName); err != nil {
t.Fatal(err)
}
prometheus, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, prometheus)
if err != nil {
t.Fatal(err)
}
pSVC := framework.MakePrometheusService(prometheus.Name, name, v1.ServiceTypeClusterIP)
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, pSVC); err != nil {
t.Fatal(errors.Wrap(err, "creating Prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
s := framework.MakeBasicServiceMonitor(name)
if _, err := framework.MonClientV1.ServiceMonitors(ns).Create(context.Background(), s, metav1.CreateOptions{}); err != nil {
t.Fatal("Creating ServiceMonitor failed: ", err)
}
err = framework.WaitForPrometheusFiringAlert(context.Background(), prometheus.Namespace, pSVC.Name, alertName)
if err != nil {
t.Fatal(err)
}
err = framework.WaitForDiscoveryWorking(context.Background(), ns, pSVC.Name, prometheus.Name)
if err != nil {
t.Fatal(errors.Wrap(err, "validating Prometheus target discovery failed"))
}
if err := framework.DeletePrometheusAndWaitUntilGone(context.Background(), ns, name); err != nil {
t.Fatal(err)
}
cancel()
select {
case err := <-errc:
t.Fatal(err)
default:
}
for _, resource := range resourceDefinitions {
if len(resource.Versions) > resource.MaxExpectedChanges || len(resource.Versions) < 1 {
var previous interface{}
for _, version := range resource.Versions {
if previous == nil {
previous = version
continue
}
fmt.Println(pretty.Compare(previous, version))
previous = version
}
t.Fatalf(
"expected resource %v to be created/updated %v times, but saw %v instead",
resource.Name,
resource.MaxExpectedChanges,
len(resource.Versions),
)
}
}
}
func testPromPreserveUserAddedMetadata(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "test"
prometheusCRD := framework.MakeBasicPrometheus(ns, name, name, 1)
prometheusCRD.Namespace = ns
prometheusCRD, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, prometheusCRD)
if err != nil {
t.Fatal(err)
}
updatedLabels := map[string]string{
"user-defined-label": "custom-label-value",
}
updatedAnnotations := map[string]string{
"user-defined-annotation": "custom-annotation-val",
}
svcClient := framework.KubeClient.CoreV1().Services(ns)
endpointsClient := framework.KubeClient.CoreV1().Endpoints(ns)
ssetClient := framework.KubeClient.AppsV1().StatefulSets(ns)
secretClient := framework.KubeClient.CoreV1().Secrets(ns)
resourceConfigs := []struct {
name string
get func() (metav1.Object, error)
update func(object metav1.Object) (metav1.Object, error)
}{
{
name: "prometheus-operated service",
get: func() (metav1.Object, error) {
return svcClient.Get(context.Background(), "prometheus-operated", metav1.GetOptions{})
},
update: func(object metav1.Object) (metav1.Object, error) {
return svcClient.Update(context.Background(), asService(t, object), metav1.UpdateOptions{})
},
},
{
name: "prometheus stateful set",
get: func() (metav1.Object, error) {
return ssetClient.Get(context.Background(), "prometheus-test", metav1.GetOptions{})
},
update: func(object metav1.Object) (metav1.Object, error) {
return ssetClient.Update(context.Background(), asStatefulSet(t, object), metav1.UpdateOptions{})
},
},
{
name: "prometheus-operated endpoints",
get: func() (metav1.Object, error) {
return endpointsClient.Get(context.Background(), "prometheus-operated", metav1.GetOptions{})
},
update: func(object metav1.Object) (metav1.Object, error) {
return endpointsClient.Update(context.Background(), asEndpoints(t, object), metav1.UpdateOptions{})
},
},
{
name: "prometheus secret",
get: func() (metav1.Object, error) {
return secretClient.Get(context.Background(), "prometheus-test", metav1.GetOptions{})
},
update: func(object metav1.Object) (metav1.Object, error) {
return secretClient.Update(context.Background(), asSecret(t, object), metav1.UpdateOptions{})
},
},
}
for _, rConf := range resourceConfigs {
res, err := rConf.get()
if err != nil {
t.Fatal(err)
}
updateObjectLabels(res, updatedLabels)
updateObjectAnnotations(res, updatedAnnotations)
_, err = rConf.update(res)
if err != nil {
t.Fatal(err)
}
}
// Ensure resource reconciles
prometheusCRD.Spec.Replicas = proto.Int32(2)
_, err = framework.UpdatePrometheusAndWaitUntilReady(context.Background(), ns, prometheusCRD)
if err != nil {
t.Fatal(err)
}
// Assert labels preserved
for _, rConf := range resourceConfigs {
res, err := rConf.get()
if err != nil {
t.Fatal(err)
}
labels := res.GetLabels()
if !containsValues(labels, updatedLabels) {
t.Errorf("%s: labels do not contain updated labels, found: %q, should contain: %q", rConf.name, labels, updatedLabels)
}
annotations := res.GetAnnotations()
if !containsValues(annotations, updatedAnnotations) {
t.Fatalf("%s: annotations do not contain updated annotations, found: %q, should contain: %q", rConf.name, annotations, updatedAnnotations)
}
}
// Cleanup
if err := framework.DeletePrometheusAndWaitUntilGone(context.Background(), ns, name); err != nil {
t.Fatal(err)
}
}
func asService(t *testing.T, object metav1.Object) *v1.Service {
svc, ok := object.(*v1.Service)
if !ok {
t.Fatalf("expected service got %T", object)
}
return svc
}
func asEndpoints(t *testing.T, object metav1.Object) *v1.Endpoints {
endpoints, ok := object.(*v1.Endpoints)
if !ok {
t.Fatalf("expected endpoints got %T", object)
}
return endpoints
}
func asStatefulSet(t *testing.T, object metav1.Object) *appsv1.StatefulSet {
sset, ok := object.(*appsv1.StatefulSet)
if !ok {
t.Fatalf("expected stateful set got %T", object)
}
return sset
}
func asSecret(t *testing.T, object metav1.Object) *v1.Secret {
sec, ok := object.(*v1.Secret)
if !ok {
t.Fatalf("expected secret set got %T", object)
}
return sec
}
func containsValues(got, expected map[string]string) bool {
for k, v := range expected {
if got[k] != v {
return false
}
}
return true
}
func updateObjectLabels(object metav1.Object, labels map[string]string) {
current := object.GetLabels()
current = mergeMap(current, labels)
object.SetLabels(current)
}
func updateObjectAnnotations(object metav1.Object, annotations map[string]string) {
current := object.GetAnnotations()
current = mergeMap(current, annotations)
object.SetAnnotations(current)
}
func mergeMap(a, b map[string]string) map[string]string {
if a == nil {
a = make(map[string]string, len(b))
}
for k, v := range b {
a[k] = v
}
return a
}
func testPromWhenDeleteCRDCleanUpViaOwnerRef(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "test"
p := framework.MakeBasicPrometheus(ns, name, name, 1)
p, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
configMapName := fmt.Sprintf("prometheus-%v-rulefiles-0", p.Name)
_, err = framework.WaitForConfigMapExist(context.Background(), ns, configMapName)
if err != nil {
t.Fatal(err)
}
// Waits for Prometheus pods to vanish
err = framework.DeletePrometheusAndWaitUntilGone(context.Background(), ns, p.Name)
if err != nil {
t.Fatal(err)
}
err = framework.WaitForConfigMapNotExist(context.Background(), ns, configMapName)
if err != nil {
t.Fatal(err)
}
}
func testPromDiscovery(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
prometheusName := "test"
group := "servicediscovery-test"
svc := framework.MakePrometheusService(prometheusName, group, v1.ServiceTypeClusterIP)
s := framework.MakeBasicServiceMonitor(group)
if _, err := framework.MonClientV1.ServiceMonitors(ns).Create(context.Background(), s, metav1.CreateOptions{}); err != nil {
t.Fatal("Creating ServiceMonitor failed: ", err)
}
p := framework.MakeBasicPrometheus(ns, prometheusName, group, 1)
_, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, svc); err != nil {
t.Fatal(errors.Wrap(err, "creating prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
_, err = framework.KubeClient.CoreV1().Secrets(ns).Get(context.Background(), fmt.Sprintf("prometheus-%s", prometheusName), metav1.GetOptions{})
if err != nil {
t.Fatal("Generated Secret could not be retrieved: ", err)
}
err = framework.WaitForDiscoveryWorking(context.Background(), ns, svc.Name, prometheusName)
if err != nil {
t.Fatal(errors.Wrap(err, "validating Prometheus target discovery failed"))
}
}
func testPromSharedResourcesReconciliation(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
s := framework.MakeBasicServiceMonitor("reconcile-test")
if _, err := framework.MonClientV1.ServiceMonitors(ns).Create(context.Background(), s, metav1.CreateOptions{}); err != nil {
t.Fatalf("Creating ServiceMonitor failed: %v", err)
}
// Create 2 Prometheus different Prometheus instances that watch the service monitor created above.
for _, prometheusName := range []string{"test", "test2"} {
p := framework.MakeBasicPrometheus(ns, prometheusName, "reconcile-test", 1)
_, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
svc := framework.MakePrometheusService(prometheusName, fmt.Sprintf("reconcile-%s", prometheusName), v1.ServiceTypeClusterIP)
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, svc); err != nil {
t.Fatal(err)
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
_, err = framework.KubeClient.CoreV1().Secrets(ns).Get(context.Background(), fmt.Sprintf("prometheus-%s", prometheusName), metav1.GetOptions{})
if err != nil {
t.Fatalf("Generated Secret could not be retrieved for %s: %v", prometheusName, err)
}
err = framework.WaitForActiveTargets(context.Background(), ns, svc.Name, 1)
if err != nil {
t.Fatalf("Validating Prometheus active targets failed for %s: %v", prometheusName, err)
}
}
if err := framework.MonClientV1.ServiceMonitors(ns).Delete(context.Background(), "reconcile-test", metav1.DeleteOptions{}); err != nil {
t.Fatalf("Deleting ServiceMonitor failed: %v", err)
}
// Delete the service monitors and check that both Prometheus instances are updated.
for _, prometheusName := range []string{"test", "test2"} {
svc := framework.MakePrometheusService(prometheusName, fmt.Sprintf("reconcile-%s", prometheusName), v1.ServiceTypeClusterIP)
if err := framework.WaitForActiveTargets(context.Background(), ns, svc.Name, 0); err != nil {
t.Fatalf("Validating Prometheus active targets failed for %s: %v", prometheusName, err)
}
}
}
func testShardingProvisioning(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
prometheusName := "test"
group := "servicediscovery-test"
svc := framework.MakePrometheusService(prometheusName, group, v1.ServiceTypeClusterIP)
s := framework.MakeBasicServiceMonitor(group)
if _, err := framework.MonClientV1.ServiceMonitors(ns).Create(context.Background(), s, metav1.CreateOptions{}); err != nil {
t.Fatal("Creating ServiceMonitor failed: ", err)
}
p := framework.MakeBasicPrometheus(ns, prometheusName, group, 1)
shards := int32(2)
p.Spec.Shards = &shards
_, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, svc); err != nil {
t.Fatal(errors.Wrap(err, "creating prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
pods := []struct {
pod string
expectedShardConfigSnippet string
}{
{
pod: "prometheus-test-0",
expectedShardConfigSnippet: `
- source_labels:
- __tmp_hash
regex: 0
action: keep`,
}, {
pod: "prometheus-test-shard-1-0",
expectedShardConfigSnippet: `
- source_labels:
- __tmp_hash
regex: 1
action: keep`,
},
}
for _, p := range pods {
stdout, _, err := framework.ExecWithOptions(testFramework.ExecOptions{
Command: []string{
"/bin/sh", "-c", "cat /etc/prometheus/config_out/prometheus.env.yaml",
},
Namespace: ns,
PodName: p.pod,
ContainerName: "prometheus",
CaptureStdout: true,
CaptureStderr: true,
Stdin: nil,
})
if err != nil {
t.Fatalf("Failed to read config from pod %q: %v", p.pod, err)
}
if !strings.Contains(stdout, p.expectedShardConfigSnippet) {
t.Fatalf("Expected shard config to be present for %v but not found in config:\n\n%s\n\nexpected to find:\n\n%s", p.pod, stdout, p.expectedShardConfigSnippet)
}
}
}
func testResharding(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
prometheusName := "test"
group := "servicediscovery-test"
svc := framework.MakePrometheusService(prometheusName, group, v1.ServiceTypeClusterIP)
s := framework.MakeBasicServiceMonitor(group)
if _, err := framework.MonClientV1.ServiceMonitors(ns).Create(context.Background(), s, metav1.CreateOptions{}); err != nil {
t.Fatal("Creating ServiceMonitor failed: ", err)
}
p := framework.MakeBasicPrometheus(ns, prometheusName, group, 1)
p, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, svc); err != nil {
t.Fatal(errors.Wrap(err, "creating prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
shards := int32(2)
p.Spec.Shards = &shards
p, err = framework.UpdatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
_, err = framework.KubeClient.AppsV1().StatefulSets(ns).Get(context.Background(), fmt.Sprintf("prometheus-%s", p.Name), metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
_, err = framework.KubeClient.AppsV1().StatefulSets(ns).Get(context.Background(), fmt.Sprintf("prometheus-%s-shard-1", p.Name), metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
shards = int32(1)
p.Spec.Shards = &shards
p, err = framework.UpdatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
_, err = framework.KubeClient.AppsV1().StatefulSets(ns).Get(context.Background(), fmt.Sprintf("prometheus-%s", p.Name), metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
err = wait.Poll(time.Second, 1*time.Minute, func() (bool, error) {
_, err = framework.KubeClient.AppsV1().StatefulSets(ns).Get(context.Background(), fmt.Sprintf("prometheus-%s-shard-1", p.Name), metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
if err == nil {
// StatefulSet still exists.
return false, nil
}
// StatefulSet not found.
return true, nil
})
}
func testPromAlertmanagerDiscovery(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
prometheusName := "test"
alertmanagerName := "test"
group := "servicediscovery-test"
svc := framework.MakePrometheusService(prometheusName, group, v1.ServiceTypeClusterIP)
amsvc := framework.MakeAlertmanagerService(alertmanagerName, group, v1.ServiceTypeClusterIP)
p := framework.MakeBasicPrometheus(ns, prometheusName, group, 1)
framework.AddAlertingToPrometheus(p, ns, alertmanagerName)
_, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, svc); err != nil {
t.Fatal(errors.Wrap(err, "creating Prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
s := framework.MakeBasicServiceMonitor(group)
if _, err := framework.MonClientV1.ServiceMonitors(ns).Create(context.Background(), s, metav1.CreateOptions{}); err != nil {
t.Fatalf("Creating ServiceMonitor failed: %v", err)
}
_, err = framework.KubeClient.CoreV1().Secrets(ns).Get(context.Background(), fmt.Sprintf("prometheus-%s", prometheusName), metav1.GetOptions{})
if err != nil {
t.Fatalf("Generated Secret could not be retrieved: %v", err)
}
if _, err := framework.CreateAlertmanagerAndWaitUntilReady(context.Background(), ns, framework.MakeBasicAlertmanager(alertmanagerName, 3)); err != nil {
t.Fatal(err)
}
if _, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, amsvc); err != nil {
t.Fatal(errors.Wrap(err, "creating Alertmanager service failed"))
}
err = wait.Poll(time.Second, 18*time.Minute, isAlertmanagerDiscoveryWorking(context.Background(), ns, svc.Name, alertmanagerName))
if err != nil {
t.Fatal(errors.Wrap(err, "validating Prometheus Alertmanager discovery failed"))
}
}
func testPromExposingWithKubernetesAPI(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
basicPrometheus := framework.MakeBasicPrometheus(ns, "basic-prometheus", "test-group", 1)
service := framework.MakePrometheusService(basicPrometheus.Name, "test-group", v1.ServiceTypeClusterIP)
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, basicPrometheus); err != nil {
t.Fatal("Creating prometheus failed: ", err)
}
if _, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, service); err != nil {
t.Fatal("Creating prometheus service failed: ", err)
}
ProxyGet := framework.KubeClient.CoreV1().Services(ns).ProxyGet
request := ProxyGet("", service.Name, "web", "/metrics", make(map[string]string))
_, err := request.DoRaw(context.Background())
if err != nil {
t.Fatal(err)
}
}
func testPromDiscoverTargetPort(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
prometheusName := "test"
group := "servicediscovery-test"
svc := framework.MakePrometheusService(prometheusName, group, v1.ServiceTypeClusterIP)
targetPort := intstr.FromInt(9090)
sm := &monitoringv1.ServiceMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: prometheusName,
Labels: map[string]string{
"group": group,
},
},
Spec: monitoringv1.ServiceMonitorSpec{
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{
"group": group,
},
},
Endpoints: []monitoringv1.Endpoint{
{
TargetPort: &targetPort,
Interval: "30s",
},
},
},
}
if _, err := framework.MonClientV1.ServiceMonitors(ns).Create(context.Background(), sm, metav1.CreateOptions{}); err != nil {
t.Fatal("Creating ServiceMonitor failed: ", err)
}
p := framework.MakeBasicPrometheus(ns, prometheusName, group, 1)
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p); err != nil {
t.Fatal(err)
}
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, svc); err != nil {
t.Fatal(errors.Wrap(err, "creating prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
_, err := framework.KubeClient.CoreV1().Secrets(ns).Get(context.Background(), fmt.Sprintf("prometheus-%s", prometheusName), metav1.GetOptions{})
if err != nil {
t.Fatal("Generated Secret could not be retrieved: ", err)
}
err = framework.WaitForDiscoveryWorking(context.Background(), ns, svc.Name, prometheusName)
if err != nil {
t.Fatal(errors.Wrap(err, "validating Prometheus target discovery failed"))
}
}
func testPromOpMatchPromAndServMonInDiffNSs(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
prometheusNSName := framework.CreateNamespace(context.Background(), t, testCtx)
serviceMonitorNSName := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, prometheusNSName)
if err := framework.AddLabelsToNamespace(
context.Background(),
serviceMonitorNSName,
map[string]string{"team": "frontend"},
); err != nil {
t.Fatal(err)
}
group := "sample-app"
prometheusJobName := serviceMonitorNSName + "/" + group
prometheusName := "test"
svc := framework.MakePrometheusService(prometheusName, group, v1.ServiceTypeClusterIP)
s := framework.MakeBasicServiceMonitor(group)
if _, err := framework.MonClientV1.ServiceMonitors(serviceMonitorNSName).Create(context.Background(), s, metav1.CreateOptions{}); err != nil {
t.Fatal("Creating ServiceMonitor failed: ", err)
}
p := framework.MakeBasicPrometheus(prometheusNSName, prometheusName, group, 1)
p.Spec.ServiceMonitorNamespaceSelector = &metav1.LabelSelector{
MatchLabels: map[string]string{
"team": "frontend",
},
}
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), prometheusNSName, p); err != nil {
t.Fatal(err)
}
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), prometheusNSName, svc); err != nil {
t.Fatal(errors.Wrap(err, "creating prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
resp, err := framework.PrometheusSVCGetRequest(context.Background(), prometheusNSName, svc.Name, "/api/v1/status/config", map[string]string{})
if err != nil {
t.Fatal(err)
}
if strings.Count(string(resp), prometheusJobName) != 1 {
t.Fatalf("expected Prometheus operator to configure Prometheus in ns '%v' to scrape the service monitor in ns '%v'", prometheusNSName, serviceMonitorNSName)
}
}
func testThanos(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
version := operator.DefaultThanosVersion
prom := framework.MakeBasicPrometheus(ns, "basic-prometheus", "test-group", 1)
prom.Spec.Replicas = proto.Int32(2)
prom.Spec.Thanos = &monitoringv1.ThanosSpec{
Version: &version,
}
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, prom); err != nil {
t.Fatal("Creating prometheus failed: ", err)
}
promSvc := framework.MakePrometheusService(prom.Name, "test-group", v1.ServiceTypeClusterIP)
if _, err := framework.KubeClient.CoreV1().Services(ns).Create(context.Background(), promSvc, metav1.CreateOptions{}); err != nil {
t.Fatal("Creating prometheus service failed: ", err)
}
svcMon := framework.MakeBasicServiceMonitor("test-group")
if _, err := framework.MonClientV1.ServiceMonitors(ns).Create(context.Background(), svcMon, metav1.CreateOptions{}); err != nil {
t.Fatal("Creating ServiceMonitor failed: ", err)
}
qryDep, err := testFramework.MakeDeployment("../../example/thanos/query-deployment.yaml")
if err != nil {
t.Fatal("Making thanos query deployment failed: ", err)
}
// override image
qryImage := "quay.io/thanos/thanos:" + version
t.Log("setting up query with image: ", qryImage)
qryDep.Spec.Template.Spec.Containers[0].Image = qryImage
// override args
qryArgs := []string{
"query",
"--log.level=debug",
"--query.replica-label=prometheus_replica",
fmt.Sprintf("--store=dnssrv+_grpc._tcp.prometheus-operated.%s.svc.cluster.local", ns),
}
t.Log("setting up query with args: ", qryArgs)
qryDep.Spec.Template.Spec.Containers[0].Args = qryArgs
if err := framework.CreateDeployment(context.Background(), ns, qryDep); err != nil {
t.Fatal("Creating Thanos query deployment failed: ", err)
}
qrySvc := framework.MakeThanosQuerierService(qryDep.Name)
if _, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, qrySvc); err != nil {
t.Fatal("Creating Thanos query service failed: ", err)
}
err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) {
proxyGet := framework.KubeClient.CoreV1().Services(ns).ProxyGet
request := proxyGet("http", qrySvc.Name, "http-query", "/api/v1/query", map[string]string{
"query": "prometheus_build_info",
"dedup": "false",
})
b, err := request.DoRaw(context.Background())
if err != nil {
t.Logf("Error performing request against Thanos query: %v\n\nretrying...", err)
return false, nil
}
d := struct {
Data struct {
Result []map[string]interface{} `json:"result"`
} `json:"data"`
}{}
err = json.Unmarshal(b, &d)
if err != nil {
return false, err
}
result := len(d.Data.Result)
// We're expecting 4 results as we are requesting the
// `prometheus_build_info` metric, which is collected for both
// Prometheus replicas by both replicas.
expected := 4
if result != expected {
t.Logf("Unexpected number of results from query. Got %d, expected %d. retrying...", result, expected)
return false, nil
}
return true, nil
})
if err != nil {
t.Fatal("Failed to get correct result from Thanos query: ", err)
}
}
func testPromGetAuthSecret(t *testing.T) {
t.Parallel()
name := "test"
tests := []struct {
name string
secret *v1.Secret
serviceMonitor func() *monitoringv1.ServiceMonitor
}{
{
name: "basic-auth",
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Data: map[string][]byte{
"user": []byte("user"),
"password": []byte("pass"),
},
},
serviceMonitor: func() *monitoringv1.ServiceMonitor {
sm := framework.MakeBasicServiceMonitor(name)
sm.Spec.Endpoints[0].BasicAuth = &monitoringv1.BasicAuth{
Username: v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "user",
},
Password: v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "password",
},
}
return sm
},
},
{
name: "bearer-token",
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Data: map[string][]byte{
"bearertoken": []byte("abc"),
},
},
serviceMonitor: func() *monitoringv1.ServiceMonitor {
sm := framework.MakeBasicServiceMonitor(name)
sm.Spec.Endpoints[0].BearerTokenSecret = v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "bearertoken",
}
sm.Spec.Endpoints[0].Path = "/bearer-metrics"
return sm
},
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBACGlobal(context.Background(), t, testCtx, ns)
maptest := make(map[string]string)
maptest["tc"] = ns
prometheusCRD := framework.MakeBasicPrometheus(ns, name, name, 1)
prometheusCRD.Spec.ServiceMonitorNamespaceSelector = &metav1.LabelSelector{
MatchLabels: maptest,
}
prometheusCRD.Spec.ScrapeInterval = "1s"
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, prometheusCRD); err != nil {
t.Fatal(err)
}
testNamespace := framework.CreateNamespace(context.Background(), t, testCtx)
err := framework.AddLabelsToNamespace(context.Background(), testNamespace, maptest)
if err != nil {
t.Fatal(err)
}
simple, err := testFramework.MakeDeployment("../../test/framework/resources/basic-auth-app-deployment.yaml")
if err != nil {
t.Fatal(err)
}
if err := framework.CreateDeployment(context.Background(), testNamespace, simple); err != nil {
t.Fatal("Creating simple basic auth app failed: ", err)
}
authSecret := test.secret
if _, err := framework.KubeClient.CoreV1().Secrets(testNamespace).Create(context.Background(), authSecret, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"group": name,
},
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeLoadBalancer,
Ports: []v1.ServicePort{
{
Name: "web",
Port: 8080,
},
},
Selector: map[string]string{
"group": name,
},
},
}
sm := test.serviceMonitor()
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), testNamespace, svc); err != nil {
t.Fatal(err)
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
if _, err := framework.MonClientV1.ServiceMonitors(testNamespace).Create(context.Background(), sm, metav1.CreateOptions{}); err != nil {
t.Fatal("Creating ServiceMonitor failed: ", err)
}
if err := framework.WaitForHealthyTargets(context.Background(), ns, "prometheus-operated", 1); err != nil {
t.Fatal(err)
}
})
}
}
// testOperatorNSScope tests the multi namespace feature of the Prometheus Operator.
// It checks whether it ignores rules that are not in the watched namespaces of the
// Prometheus Operator. The Prometheus Operator internally treats watching a
// single namespace different than watching multiple namespaces, hence the two
// sub-tests.
func testOperatorNSScope(t *testing.T) {
name := "test"
firtAlertName := "firstAlert"
secondAlertName := "secondAlert"
t.Run("SingleNS", func(t *testing.T) {
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
operatorNS := framework.CreateNamespace(context.Background(), t, testCtx)
mainNS := framework.CreateNamespace(context.Background(), t, testCtx)
arbitraryNS := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, mainNS)
prometheusNamespaceSelector := map[string]string{"prometheus": mainNS}
// Add labels to namespaces for Prometheus RuleNamespaceSelector.
for _, ns := range []string{mainNS, arbitraryNS} {
err := framework.AddLabelsToNamespace(context.Background(), ns, prometheusNamespaceSelector)
if err != nil {
t.Fatal(err)
}
}
// Prometheus Operator only watches single namespace mainNS, not arbitraryNS.
_, err := framework.CreatePrometheusOperator(context.Background(), operatorNS, *opImage, []string{mainNS}, nil, nil, nil, false, true)
if err != nil {
t.Fatal(err)
}
ruleDef := []struct {
NSName string
AlertName string
}{{arbitraryNS, secondAlertName}, {mainNS, firtAlertName}}
for _, r := range ruleDef {
_, err := framework.MakeAndCreateFiringRule(context.Background(), r.NSName, name, r.AlertName)
if err != nil {
t.Fatal(err)
}
}
p := framework.MakeBasicPrometheus(mainNS, name, name, 1)
p.Spec.RuleNamespaceSelector = &metav1.LabelSelector{
MatchLabels: prometheusNamespaceSelector,
}
p.Spec.EvaluationInterval = "1s"
p, err = framework.CreatePrometheusAndWaitUntilReady(context.Background(), mainNS, p)
if err != nil {
t.Fatal(err)
}
pSVC := framework.MakePrometheusService(p.Name, "not-relevant", v1.ServiceTypeClusterIP)
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), mainNS, pSVC); err != nil {
t.Fatal(errors.Wrap(err, "creating Prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
err = framework.WaitForPrometheusFiringAlert(context.Background(), p.Namespace, pSVC.Name, firtAlertName)
if err != nil {
t.Fatal(err)
}
firing, err := framework.CheckPrometheusFiringAlert(context.Background(), p.Namespace, pSVC.Name, secondAlertName)
if err != nil && !strings.Contains(err.Error(), "expected 1 query result but got 0") {
t.Fatal(err)
}
if firing {
t.Fatalf("expected alert %q not to fire", secondAlertName)
}
})
t.Run("MultiNS", func(t *testing.T) {
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
operatorNS := framework.CreateNamespace(context.Background(), t, testCtx)
prometheusNS := framework.CreateNamespace(context.Background(), t, testCtx)
ruleNS := framework.CreateNamespace(context.Background(), t, testCtx)
arbitraryNS := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, prometheusNS)
prometheusNamespaceSelector := map[string]string{"prometheus": prometheusNS}
for _, ns := range []string{ruleNS, arbitraryNS} {
err := framework.AddLabelsToNamespace(context.Background(), ns, prometheusNamespaceSelector)
if err != nil {
t.Fatal(err)
}
}
// Prometheus Operator only watches prometheusNS and ruleNS, not arbitraryNS.
_, err := framework.CreatePrometheusOperator(context.Background(), operatorNS, *opImage, []string{prometheusNS, ruleNS}, nil, nil, nil, false, true)
if err != nil {
t.Fatal(err)
}
ruleDef := []struct {
NSName string
AlertName string
}{{arbitraryNS, secondAlertName}, {ruleNS, firtAlertName}}
for _, r := range ruleDef {
_, err := framework.MakeAndCreateFiringRule(context.Background(), r.NSName, name, r.AlertName)
if err != nil {
t.Fatal(err)
}
}
p := framework.MakeBasicPrometheus(prometheusNS, name, name, 1)
p.Spec.RuleNamespaceSelector = &metav1.LabelSelector{
MatchLabels: prometheusNamespaceSelector,
}
p.Spec.EvaluationInterval = "1s"
p, err = framework.CreatePrometheusAndWaitUntilReady(context.Background(), prometheusNS, p)
if err != nil {
t.Fatal(err)
}
pSVC := framework.MakePrometheusService(p.Name, "not-relevant", v1.ServiceTypeClusterIP)
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), prometheusNS, pSVC); err != nil {
t.Fatal(errors.Wrap(err, "creating Prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
err = framework.WaitForPrometheusFiringAlert(context.Background(), p.Namespace, pSVC.Name, firtAlertName)
if err != nil {
t.Fatal(err)
}
firing, err := framework.CheckPrometheusFiringAlert(context.Background(), p.Namespace, pSVC.Name, secondAlertName)
if err != nil && !strings.Contains(err.Error(), "expected 1 query result but got 0") {
t.Fatal(err)
}
if firing {
t.Fatalf("expected alert %q not to fire", secondAlertName)
}
})
}
// testPromArbitraryFSAcc tests the
// github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.PrometheusSpec.ArbitraryFSAccessThroughSMs
// configuration with the service monitor bearer token and tls assets option.
func testPromArbitraryFSAcc(t *testing.T) {
t.Parallel()
name := "test"
tests := []struct {
name string
arbitraryFSAccessThroughSMsConfig monitoringv1.ArbitraryFSAccessThroughSMsConfig
endpoint monitoringv1.Endpoint
expectTargets bool
}{
//
// Bearer tokens:
//
{
name: "allowed-bearer-file",
arbitraryFSAccessThroughSMsConfig: monitoringv1.ArbitraryFSAccessThroughSMsConfig{
Deny: false,
},
endpoint: monitoringv1.Endpoint{
Port: "web",
BearerTokenFile: "/etc/ca-certificates/bearer-token",
},
expectTargets: true,
},
{
name: "denied-bearer-file",
arbitraryFSAccessThroughSMsConfig: monitoringv1.ArbitraryFSAccessThroughSMsConfig{
Deny: true,
},
endpoint: monitoringv1.Endpoint{
Port: "web",
BearerTokenFile: "/etc/ca-certificates/bearer-token",
},
expectTargets: false,
},
{
name: "denied-bearer-secret",
arbitraryFSAccessThroughSMsConfig: monitoringv1.ArbitraryFSAccessThroughSMsConfig{
Deny: true,
},
endpoint: monitoringv1.Endpoint{
Port: "web",
BearerTokenSecret: v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "bearer-token",
},
},
expectTargets: true,
},
//
// TLS assets:
//
{
name: "allowed-tls-file",
arbitraryFSAccessThroughSMsConfig: monitoringv1.ArbitraryFSAccessThroughSMsConfig{
Deny: false,
},
endpoint: monitoringv1.Endpoint{
Port: "web",
TLSConfig: &monitoringv1.TLSConfig{
CAFile: "/etc/ca-certificates/cert.pem",
CertFile: "/etc/ca-certificates/cert.pem",
KeyFile: "/etc/ca-certificates/key.pem",
},
},
expectTargets: true,
},
{
name: "denied-tls-file",
arbitraryFSAccessThroughSMsConfig: monitoringv1.ArbitraryFSAccessThroughSMsConfig{
Deny: true,
},
endpoint: monitoringv1.Endpoint{
Port: "web",
TLSConfig: &monitoringv1.TLSConfig{
CAFile: "/etc/ca-certificates/cert.pem",
CertFile: "/etc/ca-certificates/cert.pem",
KeyFile: "/etc/ca-certificates/key.pem",
},
},
expectTargets: false,
},
{
name: "denied-tls-secret",
arbitraryFSAccessThroughSMsConfig: monitoringv1.ArbitraryFSAccessThroughSMsConfig{
Deny: true,
},
endpoint: monitoringv1.Endpoint{
Port: "web",
TLSConfig: &monitoringv1.TLSConfig{
SafeTLSConfig: monitoringv1.SafeTLSConfig{
InsecureSkipVerify: true,
CA: monitoringv1.SecretOrConfigMap{
Secret: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "cert.pem",
},
},
Cert: monitoringv1.SecretOrConfigMap{
Secret: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "cert.pem",
},
},
KeySecret: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "key.pem",
},
},
},
},
expectTargets: true,
},
{
name: "denied-tls-configmap",
arbitraryFSAccessThroughSMsConfig: monitoringv1.ArbitraryFSAccessThroughSMsConfig{
Deny: true,
},
endpoint: monitoringv1.Endpoint{
Port: "web",
TLSConfig: &monitoringv1.TLSConfig{
SafeTLSConfig: monitoringv1.SafeTLSConfig{
InsecureSkipVerify: true,
CA: monitoringv1.SecretOrConfigMap{
ConfigMap: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "cert.pem",
},
},
Cert: monitoringv1.SecretOrConfigMap{
ConfigMap: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "cert.pem",
},
},
KeySecret: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "key.pem",
},
},
},
},
expectTargets: true,
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
// Create secret either used by bearer token secret key ref, tls
// asset key ref or tls configmap key ref.
cert, err := ioutil.ReadFile("../../test/instrumented-sample-app/certs/cert.pem")
if err != nil {
t.Fatalf("failed to load cert.pem: %v", err)
}
key, err := ioutil.ReadFile("../../test/instrumented-sample-app/certs/key.pem")
if err != nil {
t.Fatalf("failed to load key.pem: %v", err)
}
tlsCertsSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Data: map[string][]byte{
"cert.pem": cert,
"key.pem": key,
"bearer-token": []byte("abc"),
},
}
if _, err := framework.KubeClient.CoreV1().Secrets(ns).Create(context.Background(), tlsCertsSecret, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
tlsCertsConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Data: map[string]string{
"cert.pem": string(cert),
},
}
if _, err := framework.KubeClient.CoreV1().ConfigMaps(ns).Create(context.Background(), tlsCertsConfigMap, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
s := framework.MakeBasicServiceMonitor(name)
s.Spec.Endpoints[0] = test.endpoint
if _, err := framework.MonClientV1.ServiceMonitors(ns).Create(context.Background(), s, metav1.CreateOptions{}); err != nil {
t.Fatal("creating ServiceMonitor failed: ", err)
}
prometheusCRD := framework.MakeBasicPrometheus(ns, name, name, 1)
prometheusCRD.Namespace = ns
prometheusCRD.Spec.ArbitraryFSAccessThroughSMs = test.arbitraryFSAccessThroughSMsConfig
if strings.HasSuffix(test.name, "-file") {
mountTLSFiles(prometheusCRD, name)
}
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, prometheusCRD); err != nil {
t.Fatal(err)
}
svc := framework.MakePrometheusService(prometheusCRD.Name, name, v1.ServiceTypeClusterIP)
if _, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, svc); err != nil {
t.Fatal(err)
}
if test.expectTargets {
if err := framework.WaitForActiveTargets(context.Background(), ns, svc.Name, 1); err != nil {
t.Fatal(err)
}
return
}
// Make sure Prometheus has enough time to reload.
time.Sleep(2 * time.Minute)
if err := framework.WaitForActiveTargets(context.Background(), ns, svc.Name, 0); err != nil {
t.Fatal(err)
}
})
}
}
// mountTLSFiles is a helper to manually mount TLS certificate files
// into the prometheus container
func mountTLSFiles(p *monitoringv1.Prometheus, secretName string) {
volumeName := secretName
p.Spec.Volumes = append(p.Spec.Volumes,
v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: secretName,
},
},
})
p.Spec.Containers = []v1.Container{
{
Name: "prometheus",
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: "/etc/ca-certificates",
},
},
},
}
}
// testPromTLSConfigViaSecret tests the service monitor endpoint option to load
// certificate assets via Kubernetes secrets into the Prometheus container.
func testPromTLSConfigViaSecret(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
name := "test"
//
// Setup sample app.
//
cert, err := ioutil.ReadFile("../../test/instrumented-sample-app/certs/cert.pem")
if err != nil {
t.Fatalf("failed to load cert.pem: %v", err)
}
key, err := ioutil.ReadFile("../../test/instrumented-sample-app/certs/key.pem")
if err != nil {
t.Fatalf("failed to load key.pem: %v", err)
}
tlsCertsSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Data: map[string][]byte{
"cert.pem": cert,
"key.pem": key,
},
}
if _, err := framework.KubeClient.CoreV1().Secrets(ns).Create(context.Background(), tlsCertsSecret, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
simple, err := testFramework.MakeDeployment("../../test/framework/resources/basic-auth-app-deployment.yaml")
if err != nil {
t.Fatal(err)
}
simple.Spec.Template.Spec.Containers[0].Args = []string{"--cert-path=/etc/certs"}
simple.Spec.Template.Spec.Volumes = []v1.Volume{
{
Name: "tls-certs",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: tlsCertsSecret.Name,
},
},
},
}
simple.Spec.Template.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
{
Name: simple.Spec.Template.Spec.Volumes[0].Name,
MountPath: "/etc/certs",
},
}
if err := framework.CreateDeployment(context.Background(), ns, simple); err != nil {
t.Fatal("Creating simple basic auth app failed: ", err)
}
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"group": name,
},
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeLoadBalancer,
Ports: []v1.ServicePort{
{
Name: "web",
Port: 8080,
},
{
Name: "mtls",
Port: 8081,
},
},
Selector: map[string]string{
"group": name,
},
},
}
if _, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, svc); err != nil {
t.Fatal(err)
}
//
// Setup monitoring.
//
sm := framework.MakeBasicServiceMonitor(name)
sm.Spec.Endpoints = []monitoringv1.Endpoint{
{
Port: "mtls",
Interval: "30s",
Scheme: "https",
TLSConfig: &monitoringv1.TLSConfig{
SafeTLSConfig: monitoringv1.SafeTLSConfig{
InsecureSkipVerify: true,
Cert: monitoringv1.SecretOrConfigMap{
Secret: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: tlsCertsSecret.Name,
},
Key: "cert.pem",
},
},
KeySecret: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: tlsCertsSecret.Name,
},
Key: "key.pem",
},
},
},
},
}
if _, err := framework.MonClientV1.ServiceMonitors(ns).Create(context.Background(), sm, metav1.CreateOptions{}); err != nil {
t.Fatal("creating ServiceMonitor failed: ", err)
}
prometheusCRD := framework.MakeBasicPrometheus(ns, name, name, 1)
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, prometheusCRD); err != nil {
t.Fatal(err)
}
promSVC := framework.MakePrometheusService(prometheusCRD.Name, name, v1.ServiceTypeClusterIP)
if _, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, promSVC); err != nil {
t.Fatal(err)
}
//
// Check for proper scraping.
//
if err := framework.WaitForHealthyTargets(context.Background(), ns, promSVC.Name, 1); err != nil {
t.Fatal(err)
}
// TODO: Do a poll instead, should speed up things.
time.Sleep(30 * time.Second)
response, err := framework.PrometheusSVCGetRequest(
context.Background(),
ns,
promSVC.Name,
"/api/v1/query",
map[string]string{"query": fmt.Sprintf(`up{job="%v",endpoint="%v"}`, name, sm.Spec.Endpoints[0].Port)},
)
if err != nil {
t.Fatal(err)
}
q := testFramework.PrometheusQueryAPIResponse{}
if err := json.NewDecoder(bytes.NewBuffer(response)).Decode(&q); err != nil {
t.Fatal(err)
}
if q.Status != "success" {
t.Fatalf("expected query status to be 'success' but got %v", q.Status)
}
if q.Data.Result[0].Value[1] != "1" {
t.Fatalf("expected query result to be '1' but got %v", q.Data.Result[0].Value[1])
}
}
func testPromStaticProbe(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
blackboxExporterName := "blackbox-exporter"
if err := framework.CreateBlackBoxExporterAndWaitUntilReady(context.Background(), ns, blackboxExporterName); err != nil {
t.Fatal("Creating blackbox exporter failed: ", err)
}
blackboxSvc := framework.MakeBlackBoxExporterService(ns, blackboxExporterName)
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, blackboxSvc); err != nil {
t.Fatal("creating blackbox exporter service failed ", err)
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
prometheusName := "test"
group := "probe-test"
svc := framework.MakePrometheusService(prometheusName, group, v1.ServiceTypeClusterIP)
proberURL := blackboxExporterName + ":9115"
targets := []string{svc.Name + ":9090"}
probe := framework.MakeBasicStaticProbe(group, proberURL, targets)
if _, err := framework.MonClientV1.Probes(ns).Create(context.Background(), probe, metav1.CreateOptions{}); err != nil {
t.Fatal("Creating Probe failed: ", err)
}
p := framework.MakeBasicPrometheus(ns, prometheusName, group, 1)
p.Spec.ProbeSelector = &metav1.LabelSelector{
MatchLabels: map[string]string{
"group": group,
},
}
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p); err != nil {
t.Fatal(err)
}
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, svc); err != nil {
t.Fatal(errors.Wrap(err, "creating prometheus service failed"))
} else {
testCtx.AddFinalizerFn(finalizerFn)
}
expectedURL := url.URL{Host: proberURL, Scheme: "http", Path: "/probe"}
q := expectedURL.Query()
q.Set("module", "http_2xx")
q.Set("target", targets[0])
expectedURL.RawQuery = q.Encode()
if err := wait.Poll(time.Second, time.Minute*5, func() (bool, error) {
activeTargets, err := framework.GetActiveTargets(context.Background(), ns, svc.Name)
if err != nil {
return false, err
}
if len(activeTargets) != 1 {
return false, nil
}
exp := expectedURL.String()
if activeTargets[0].ScrapeURL != exp {
return false, nil
}
if value, ok := activeTargets[0].Labels["instance"]; !ok || value != targets[0] {
return false, nil
}
return true, nil
}); err != nil {
t.Fatal("waiting for static probe targets timed out.")
}
}
func testPromSecurePodMonitor(t *testing.T) {
t.Parallel()
name := "test"
tests := []struct {
name string
endpoint monitoringv1.PodMetricsEndpoint
}{
//
// Basic auth:
//
{
name: "basic-auth-secret",
endpoint: monitoringv1.PodMetricsEndpoint{
Port: "web",
BasicAuth: &monitoringv1.BasicAuth{
Username: v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "user",
},
Password: v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "password",
},
},
},
},
//
// Bearer tokens:
//
{
name: "bearer-secret",
endpoint: monitoringv1.PodMetricsEndpoint{
Port: "web",
BearerTokenSecret: v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "bearer-token",
},
Path: "/bearer-metrics",
},
},
//
// TLS assets:
//
{
name: "tls-secret",
endpoint: monitoringv1.PodMetricsEndpoint{
Port: "mtls",
Scheme: "https",
TLSConfig: &monitoringv1.PodMetricsEndpointTLSConfig{
SafeTLSConfig: monitoringv1.SafeTLSConfig{
InsecureSkipVerify: true,
CA: monitoringv1.SecretOrConfigMap{
Secret: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "cert.pem",
},
},
Cert: monitoringv1.SecretOrConfigMap{
Secret: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "cert.pem",
},
},
KeySecret: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "key.pem",
},
},
},
Path: "/",
},
},
{
name: "tls-configmap",
endpoint: monitoringv1.PodMetricsEndpoint{
Port: "mtls",
Scheme: "https",
TLSConfig: &monitoringv1.PodMetricsEndpointTLSConfig{
SafeTLSConfig: monitoringv1.SafeTLSConfig{
InsecureSkipVerify: true,
CA: monitoringv1.SecretOrConfigMap{
ConfigMap: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "cert.pem",
},
},
Cert: monitoringv1.SecretOrConfigMap{
ConfigMap: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "cert.pem",
},
},
KeySecret: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "key.pem",
},
},
},
Path: "/",
},
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
// Create secret either used by bearer token secret key ref, tls
// asset key ref or tls configmap key ref.
cert, err := ioutil.ReadFile("../../test/instrumented-sample-app/certs/cert.pem")
if err != nil {
t.Fatalf("failed to load cert.pem: %v", err)
}
key, err := ioutil.ReadFile("../../test/instrumented-sample-app/certs/key.pem")
if err != nil {
t.Fatalf("failed to load key.pem: %v", err)
}
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Data: map[string][]byte{
"user": []byte("user"),
"password": []byte("pass"),
"bearer-token": []byte("abc"),
"cert.pem": cert,
"key.pem": key,
},
}
if _, err := framework.KubeClient.CoreV1().Secrets(ns).Create(context.Background(), secret, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
tlsCertsConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Data: map[string]string{
"cert.pem": string(cert),
},
}
if _, err := framework.KubeClient.CoreV1().ConfigMaps(ns).Create(context.Background(), tlsCertsConfigMap, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
prom := framework.MakeBasicPrometheus(ns, name, name, 1)
prom.Namespace = ns
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, prom); err != nil {
t.Fatal(err)
}
simple, err := testFramework.MakeDeployment("../../test/framework/resources/basic-auth-app-deployment.yaml")
if err != nil {
t.Fatal(err)
}
simple.Spec.Template.Spec.Volumes = []v1.Volume{
{
Name: name,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name,
},
},
},
}
simple.Spec.Template.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
{
Name: name,
MountPath: "/etc/ca-certificates",
},
}
if test.endpoint.Port == "mtls" {
simple.Spec.Template.Spec.Containers[0].Args = []string{"--cert-path=/etc/ca-certificates"}
}
if err := framework.CreateDeployment(context.Background(), ns, simple); err != nil {
t.Fatal("failed to create simple basic auth app: ", err)
}
pm := framework.MakeBasicPodMonitor(name)
pm.Spec.PodMetricsEndpoints[0] = test.endpoint
if _, err := framework.MonClientV1.PodMonitors(ns).Create(context.Background(), pm, metav1.CreateOptions{}); err != nil {
t.Fatal("failed to create PodMonitor: ", err)
}
if err := framework.WaitForHealthyTargets(context.Background(), ns, "prometheus-operated", 1); err != nil {
t.Fatal(err)
}
})
}
}
func testPromWebTLS(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
host := fmt.Sprintf("%s.%s.svc", "basic-prometheus", ns)
certBytes, keyBytes, err := certutil.GenerateSelfSignedCertKey(host, nil, nil)
if err != nil {
t.Fatal(err)
}
kubeClient := framework.KubeClient
if err := framework.CreateSecretWithCert(context.Background(), certBytes, keyBytes, ns, "web-tls"); err != nil {
t.Fatal(err)
}
prom := framework.MakeBasicPrometheus(ns, "basic-prometheus", "test-group", 1)
prom.Spec.Web = &monitoringv1.WebSpec{
TLSConfig: &monitoringv1.WebTLSConfig{
KeySecret: v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "web-tls",
},
Key: "tls.key",
},
Cert: monitoringv1.SecretOrConfigMap{
Secret: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "web-tls",
},
Key: "tls.crt",
},
},
},
}
if _, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, prom); err != nil {
t.Fatal("Creating prometheus failed: ", err)
}
promPods, err := kubeClient.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{})
if err != nil {
t.Fatal(err)
}
if len(promPods.Items) == 0 {
t.Fatalf("No prometheus pods found in namespace %s", ns)
}
cfg := framework.RestConfig
podName := promPods.Items[0].Name
if err := testFramework.StartPortForward(cfg, "https", podName, ns, "9090"); err != nil {
return
}
// The prometheus certificate is issued to <pod>.<namespace>.svc,
// but port-forwarding is done through localhost.
// This is why we use an http client which skips the TLS verification.
// In the test we will verify the TLS certificate manually to make sure
// the prometheus instance is configured properly.
httpClient := http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
}
resp, err := httpClient.Get("https://localhost:9090")
if err != nil {
t.Fatal(err)
}
receivedCertBytes, err := certutil.EncodeCertificates(resp.TLS.PeerCertificates...)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(receivedCertBytes, certBytes) {
t.Fatal("Certificate received from prometheus instance does not match the one which is configured")
}
}
func testPromMinReadySeconds(t *testing.T) {
runFeatureGatedTests(t)
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, ns)
kubeClient := framework.KubeClient
var setMinReadySecondsInitial uint32 = 5
prom := framework.MakeBasicPrometheus(ns, "basic-prometheus", "test-group", 1)
prom.Spec.MinReadySeconds = &setMinReadySecondsInitial
prom, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, prom)
if err != nil {
t.Fatal("Creating prometheus failed: ", err)
}
promSS, err := kubeClient.AppsV1().StatefulSets(ns).Get(context.Background(), "prometheus-basic-prometheus", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
if promSS.Spec.MinReadySeconds != int32(setMinReadySecondsInitial) {
t.Fatalf("expected MinReadySeconds to be %d but got %d", setMinReadySecondsInitial, promSS.Spec.MinReadySeconds)
}
var updated uint32 = 10
prom.Spec.MinReadySeconds = &updated
if _, err = framework.UpdatePrometheusAndWaitUntilReady(context.Background(), ns, prom); err != nil {
t.Fatal("Updating prometheus failed: ", err)
}
promSS, err = kubeClient.AppsV1().StatefulSets(ns).Get(context.Background(), "prometheus-basic-prometheus", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
if promSS.Spec.MinReadySeconds != int32(updated) {
t.Fatalf("expected MinReadySeconds to be %d but got %d", updated, promSS.Spec.MinReadySeconds)
}
}
// testPromEnforcedNamespaceLabel checks that the enforcedNamespaceLabel field
// is honored even if a user tries to bypass the enforcement.
func testPromEnforcedNamespaceLabel(t *testing.T) {
t.Parallel()
for i, tc := range []struct {
relabelConfigs []*monitoringv1.RelabelConfig
metricRelabelConfigs []*monitoringv1.RelabelConfig
}{
{
// override label using the labeldrop action.
relabelConfigs: []*monitoringv1.RelabelConfig{
{
Regex: "namespace",
Action: "labeldrop",
},
},
metricRelabelConfigs: []*monitoringv1.RelabelConfig{
{
Regex: "namespace",
Action: "labeldrop",
},
},
},
{
// override label using the replace action.
relabelConfigs: []*monitoringv1.RelabelConfig{
{
TargetLabel: "namespace",
Replacement: "ns1",
},
},
metricRelabelConfigs: []*monitoringv1.RelabelConfig{
{
TargetLabel: "namespace",
Replacement: "ns1",
},
},
},
{
// override label using the labelmap action.
relabelConfigs: []*monitoringv1.RelabelConfig{
{
TargetLabel: "temp_namespace",
Replacement: "ns1",
},
},
metricRelabelConfigs: []*monitoringv1.RelabelConfig{
{
Action: "labelmap",
Regex: "temp_namespace",
Replacement: "namespace",
},
{
Action: "labeldrop",
Regex: "temp_namespace",
},
},
},
} {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup(t)
ns := framework.CreateNamespace(context.Background(), t, ctx)
framework.SetupPrometheusRBAC(context.Background(), t, ctx, ns)
prometheusName := "test"
group := "servicediscovery-test"
svc := framework.MakePrometheusService(prometheusName, group, v1.ServiceTypeClusterIP)
s := framework.MakeBasicServiceMonitor(group)
s.Spec.Endpoints[0].RelabelConfigs = tc.relabelConfigs
s.Spec.Endpoints[0].MetricRelabelConfigs = tc.metricRelabelConfigs
if _, err := framework.MonClientV1.ServiceMonitors(ns).Create(context.Background(), s, metav1.CreateOptions{}); err != nil {
t.Fatal("Creating ServiceMonitor failed: ", err)
}
p := framework.MakeBasicPrometheus(ns, prometheusName, group, 1)
p.Spec.EnforcedNamespaceLabel = "namespace"
_, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
}
if finalizerFn, err := framework.CreateServiceAndWaitUntilReady(context.Background(), ns, svc); err != nil {
t.Fatal(errors.Wrap(err, "creating prometheus service failed"))
} else {
ctx.AddFinalizerFn(finalizerFn)
}
_, err = framework.KubeClient.CoreV1().Secrets(ns).Get(context.Background(), fmt.Sprintf("prometheus-%s", prometheusName), metav1.GetOptions{})
if err != nil {
t.Fatal("Generated Secret could not be retrieved: ", err)
}
err = framework.WaitForDiscoveryWorking(context.Background(), ns, svc.Name, prometheusName)
if err != nil {
t.Fatal(errors.Wrap(err, "validating Prometheus target discovery failed"))
}
// Check that the namespace label is enforced to the correct value.
var (
loopErr error
namespaceLabel string
)
err = wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
loopErr = nil
res, err := framework.PrometheusQuery(ns, svc.Name, "prometheus_build_info")
if err != nil {
loopErr = errors.Wrap(err, "failed to query Prometheus")
return false, nil
}
if len(res) != 1 {
loopErr = fmt.Errorf("expecting 1 item but got %d", len(res))
return false, nil
}
for k, v := range res[0].Metric {
if k == "namespace" {
namespaceLabel = v
return true, nil
}
}
loopErr = fmt.Errorf("expecting to find 'namespace' label in %v", res[0].Metric)
return false, nil
})
if err != nil {
t.Fatalf("%v: %v", err, loopErr)
}
if namespaceLabel != ns {
t.Fatalf("expecting 'namespace' label value to be %q but got %q instead", ns, namespaceLabel)
}
})
}
}
func isAlertmanagerDiscoveryWorking(ctx context.Context, ns, promSVCName, alertmanagerName string) func() (bool, error) {
return func() (bool, error) {
pods, err := framework.KubeClient.CoreV1().Pods(ns).List(context.Background(), alertmanager.ListOptions(alertmanagerName))
if err != nil {
return false, err
}
if 3 != len(pods.Items) {
return false, nil
}
expectedAlertmanagerTargets := []string{}
for _, p := range pods.Items {
expectedAlertmanagerTargets = append(expectedAlertmanagerTargets, fmt.Sprintf("http://%s:9093/api/v2/alerts", p.Status.PodIP))
}
response, err := framework.PrometheusSVCGetRequest(context.Background(), ns, promSVCName, "/api/v1/alertmanagers", map[string]string{})
if err != nil {
return false, err
}
ra := prometheusAlertmanagerAPIResponse{}
if err := json.NewDecoder(bytes.NewBuffer(response)).Decode(&ra); err != nil {
return false, err
}
if assertExpectedAlertmanagerTargets(ra.Data.ActiveAlertmanagers, expectedAlertmanagerTargets) {
return true, nil
}
return false, nil
}
}
func assertExpectedAlertmanagerTargets(ams []*alertmanagerTarget, expectedTargets []string) bool {
log.Printf("Expected Alertmanager Targets: %#+v\n", expectedTargets)
existingTargets := []string{}
for _, am := range ams {
existingTargets = append(existingTargets, am.URL)
}
sort.Strings(expectedTargets)
sort.Strings(existingTargets)
if !reflect.DeepEqual(expectedTargets, existingTargets) {
log.Printf("Existing Alertmanager Targets: %#+v\n", existingTargets)
return false
}
return true
}
type alertmanagerTarget struct {
URL string `json:"url"`
}
type alertmanagerDiscovery struct {
ActiveAlertmanagers []*alertmanagerTarget `json:"activeAlertmanagers"`
}
type prometheusAlertmanagerAPIResponse struct {
Status string `json:"status"`
Data *alertmanagerDiscovery `json:"data"`
}
func gzipConfig(buf *bytes.Buffer, conf []byte) error {
w := gzip.NewWriter(buf)
defer w.Close()
if _, err := w.Write(conf); err != nil {
return err
}
return nil
}
| 1 | 17,033 | This test alone takes around 10-12 minutes to run. It has 20 subtests each of which takes around one minute. We should see if we there are redundant subtests that we can remove, or look for a way to optimize them. | prometheus-operator-prometheus-operator | go |
@@ -10,11 +10,14 @@ import options from '../options';
* @param {boolean} isSvg Whether or not this node is an SVG node
*/
export function diffProps(dom, newProps, oldProps, isSvg) {
- for (let i in newProps) {
- if (i!=='children' && i!=='key' && (!oldProps || ((i==='value' || i==='checked') ? dom : oldProps)[i]!==newProps[i])) {
- setProperty(dom, i, newProps[i], oldProps[i], isSvg);
+ let keys = Object.keys(newProps).sort();
+ for (let i = 0; i < keys.length; i++) {
+ let k = keys[i];
+ if (k!=='children' && k!=='key' && (!oldProps || ((k==='value' || k==='checked') ? dom : oldProps)[k]!==newProps[k])) {
+ setProperty(dom, k, newProps[k], oldProps[k], isSvg);
}
}
+
for (let i in oldProps) {
if (i!=='children' && i!=='key' && (!newProps || !(i in newProps))) {
setProperty(dom, i, null, oldProps[i], isSvg); | 1 | import { IS_NON_DIMENSIONAL } from '../constants';
import options from '../options';
/**
* Diff the old and new properties of a VNode and apply changes to the DOM node
* @param {import('../internal').PreactElement} dom The DOM node to apply
* changes to
* @param {object} newProps The new props
* @param {object} oldProps The old props
* @param {boolean} isSvg Whether or not this node is an SVG node
*/
export function diffProps(dom, newProps, oldProps, isSvg) {
for (let i in newProps) {
if (i!=='children' && i!=='key' && (!oldProps || ((i==='value' || i==='checked') ? dom : oldProps)[i]!==newProps[i])) {
setProperty(dom, i, newProps[i], oldProps[i], isSvg);
}
}
for (let i in oldProps) {
if (i!=='children' && i!=='key' && (!newProps || !(i in newProps))) {
setProperty(dom, i, null, oldProps[i], isSvg);
}
}
}
let CAMEL_REG = /-?(?=[A-Z])/g;
/**
* Set a property value on a DOM node
* @param {import('../internal').PreactElement} dom The DOM node to modify
* @param {string} name The name of the property to set
* @param {*} value The value to set the property to
* @param {*} oldValue The old value the property had
* @param {boolean} isSvg Whether or not this DOM node is an SVG node or not
*/
function setProperty(dom, name, value, oldValue, isSvg) {
let v;
if (name==='class' || name==='className') name = isSvg ? 'class' : 'className';
if (name==='style') {
/* Possible golfing activities for setting styles:
* - we could just drop String style values. They're not supported in other VDOM libs.
* - assigning to .style sets .style.cssText - TODO: benchmark this, might not be worth the bytes.
* - assigning also casts to String, and ignores invalid values. This means assigning an Object clears all styles.
*/
let s = dom.style;
if (typeof value==='string') {
s.cssText = value;
}
else {
if (typeof oldValue==='string') s.cssText = '';
else {
// remove values not in the new list
for (let i in oldValue) {
if (value==null || !(i in value)) s.setProperty(i.replace(CAMEL_REG, '-'), '');
}
}
for (let i in value) {
v = value[i];
if (oldValue==null || v!==oldValue[i]) {
s.setProperty(i.replace(CAMEL_REG, '-'), typeof v==='number' && IS_NON_DIMENSIONAL.test(i)===false ? (v + 'px') : v);
}
}
}
}
else if (name==='dangerouslySetInnerHTML') {
return;
}
// Benchmark for comparison: https://esbench.com/bench/574c954bdb965b9a00965ac6
else if (name[0]==='o' && name[1]==='n') {
let useCapture = name !== (name=name.replace(/Capture$/, ''));
let nameLower = name.toLowerCase();
name = (nameLower in dom ? nameLower : name).substring(2);
if (value) {
if (!oldValue) dom.addEventListener(name, eventProxy, useCapture);
}
else {
dom.removeEventListener(name, eventProxy, useCapture);
}
(dom._listeners || (dom._listeners = {}))[name] = value;
}
else if (name!=='list' && name!=='tagName' && !isSvg && (name in dom)) {
dom[name] = value==null ? '' : value;
}
else if (value==null || value===false) {
if (name!==(name = name.replace(/^xlink:?/, ''))) dom.removeAttributeNS('http://www.w3.org/1999/xlink', name.toLowerCase());
else dom.removeAttribute(name);
}
else if (typeof value!=='function') {
if (name!==(name = name.replace(/^xlink:?/, ''))) dom.setAttributeNS('http://www.w3.org/1999/xlink', name.toLowerCase(), value);
else dom.setAttribute(name, value);
}
}
/**
* Proxy an event to hooked event handlers
* @param {Event} e The event object from the browser
* @private
*/
function eventProxy(e) {
return this._listeners[e.type](options.event ? options.event(e) : e);
}
| 1 | 12,721 | Good call, we need to keep that check :+1: | preactjs-preact | js |
@@ -11,5 +11,9 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http
bool ParseRequestLine(TRequestHandler handler, in ReadOnlySequence<byte> buffer, out SequencePosition consumed, out SequencePosition examined);
bool ParseHeaders(TRequestHandler handler, in ReadOnlySequence<byte> buffer, out SequencePosition consumed, out SequencePosition examined, out int consumedBytes);
+
+ bool ParseRequestLine(TRequestHandler handler, ref BufferReader<byte> reader);
+
+ bool ParseHeaders(TRequestHandler handler, ref BufferReader<byte> reader);
}
} | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Buffers;
namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http
{
public interface IHttpParser<TRequestHandler> where TRequestHandler : IHttpHeadersHandler, IHttpRequestLineHandler
{
bool ParseRequestLine(TRequestHandler handler, in ReadOnlySequence<byte> buffer, out SequencePosition consumed, out SequencePosition examined);
bool ParseHeaders(TRequestHandler handler, in ReadOnlySequence<byte> buffer, out SequencePosition consumed, out SequencePosition examined, out int consumedBytes);
}
}
| 1 | 17,067 | Should the interface have 2 `ParseHeaders` methods? | aspnet-KestrelHttpServer | .cs |
@@ -87,9 +87,11 @@ public class HiveTableOperations extends BaseMetastoreTableOperations {
private static final String HIVE_LOCK_CHECK_MIN_WAIT_MS = "iceberg.hive.lock-check-min-wait-ms";
private static final String HIVE_LOCK_CHECK_MAX_WAIT_MS = "iceberg.hive.lock-check-max-wait-ms";
private static final String HIVE_TABLE_LEVEL_LOCK_EVICT_MS = "iceberg.hive.table-level-lock-evict-ms";
+ private static final String HIVE_ICEBERG_METADATA_REFRESH_MAX_RETRIES = "hive.iceberg.metadata.refresh.max.retries";
private static final long HIVE_ACQUIRE_LOCK_TIMEOUT_MS_DEFAULT = 3 * 60 * 1000; // 3 minutes
private static final long HIVE_LOCK_CHECK_MIN_WAIT_MS_DEFAULT = 50; // 50 milliseconds
private static final long HIVE_LOCK_CHECK_MAX_WAIT_MS_DEFAULT = 5 * 1000; // 5 seconds
+ private static final int HIVE_ICEBERG_METADATA_REFRESH_MAX_RETRIES_DEFAULT = 2;
private static final long HIVE_TABLE_LEVEL_LOCK_EVICT_MS_DEFAULT = TimeUnit.MINUTES.toMillis(10);
private static final DynMethods.UnboundMethod ALTER_TABLE = DynMethods.builder("alter_table")
.impl(IMetaStoreClient.class, "alter_table_with_environmentContext", | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.hive;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.LockComponent;
import org.apache.hadoop.hive.metastore.api.LockLevel;
import org.apache.hadoop.hive.metastore.api.LockRequest;
import org.apache.hadoop.hive.metastore.api.LockResponse;
import org.apache.hadoop.hive.metastore.api.LockState;
import org.apache.hadoop.hive.metastore.api.LockType;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.iceberg.BaseMetastoreTableOperations;
import org.apache.iceberg.ClientPool;
import org.apache.iceberg.Snapshot;
import org.apache.iceberg.SnapshotSummary;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.common.DynMethods;
import org.apache.iceberg.exceptions.AlreadyExistsException;
import org.apache.iceberg.exceptions.CommitFailedException;
import org.apache.iceberg.exceptions.CommitStateUnknownException;
import org.apache.iceberg.exceptions.NoSuchIcebergTableException;
import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.hadoop.ConfigProperties;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
import org.apache.iceberg.relocated.com.google.common.collect.BiMap;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableBiMap;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.util.Tasks;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.iceberg.TableProperties.GC_ENABLED;
/**
* TODO we should be able to extract some more commonalities to BaseMetastoreTableOperations to
* avoid code duplication between this class and Metacat Tables.
*/
public class HiveTableOperations extends BaseMetastoreTableOperations {
private static final Logger LOG = LoggerFactory.getLogger(HiveTableOperations.class);
private static final String HIVE_ACQUIRE_LOCK_TIMEOUT_MS = "iceberg.hive.lock-timeout-ms";
private static final String HIVE_LOCK_CHECK_MIN_WAIT_MS = "iceberg.hive.lock-check-min-wait-ms";
private static final String HIVE_LOCK_CHECK_MAX_WAIT_MS = "iceberg.hive.lock-check-max-wait-ms";
private static final String HIVE_TABLE_LEVEL_LOCK_EVICT_MS = "iceberg.hive.table-level-lock-evict-ms";
private static final long HIVE_ACQUIRE_LOCK_TIMEOUT_MS_DEFAULT = 3 * 60 * 1000; // 3 minutes
private static final long HIVE_LOCK_CHECK_MIN_WAIT_MS_DEFAULT = 50; // 50 milliseconds
private static final long HIVE_LOCK_CHECK_MAX_WAIT_MS_DEFAULT = 5 * 1000; // 5 seconds
private static final long HIVE_TABLE_LEVEL_LOCK_EVICT_MS_DEFAULT = TimeUnit.MINUTES.toMillis(10);
private static final DynMethods.UnboundMethod ALTER_TABLE = DynMethods.builder("alter_table")
.impl(IMetaStoreClient.class, "alter_table_with_environmentContext",
String.class, String.class, Table.class, EnvironmentContext.class)
.impl(IMetaStoreClient.class, "alter_table",
String.class, String.class, Table.class, EnvironmentContext.class)
.impl(IMetaStoreClient.class, "alter_table",
String.class, String.class, Table.class)
.build();
private static final BiMap<String, String> ICEBERG_TO_HMS_TRANSLATION = ImmutableBiMap.of(
// gc.enabled in Iceberg and external.table.purge in Hive are meant to do the same things but with different names
GC_ENABLED, "external.table.purge"
);
private static Cache<String, ReentrantLock> commitLockCache;
private static synchronized void initTableLevelLockCache(long evictionTimeout) {
if (commitLockCache == null) {
commitLockCache = Caffeine.newBuilder()
.expireAfterAccess(evictionTimeout, TimeUnit.MILLISECONDS)
.build();
}
}
/**
* Provides key translation where necessary between Iceberg and HMS props. This translation is needed because some
* properties control the same behaviour but are named differently in Iceberg and Hive. Therefore changes to these
* property pairs should be synchronized.
*
* Example: Deleting data files upon DROP TABLE is enabled using gc.enabled=true in Iceberg and
* external.table.purge=true in Hive. Hive and Iceberg users are unaware of each other's control flags, therefore
* inconsistent behaviour can occur from e.g. a Hive user's point of view if external.table.purge=true is set on the
* HMS table but gc.enabled=false is set on the Iceberg table, resulting in no data file deletion.
*
* @param hmsProp The HMS property that should be translated to Iceberg property
* @return Iceberg property equivalent to the hmsProp. If no such translation exists, the original hmsProp is returned
*/
public static String translateToIcebergProp(String hmsProp) {
return ICEBERG_TO_HMS_TRANSLATION.inverse().getOrDefault(hmsProp, hmsProp);
}
private static class WaitingForLockException extends RuntimeException {
WaitingForLockException(String message) {
super(message);
}
}
private final String fullName;
private final String database;
private final String tableName;
private final Configuration conf;
private final long lockAcquireTimeout;
private final long lockCheckMinWaitTime;
private final long lockCheckMaxWaitTime;
private final FileIO fileIO;
private final ClientPool<IMetaStoreClient, TException> metaClients;
protected HiveTableOperations(Configuration conf, ClientPool metaClients, FileIO fileIO,
String catalogName, String database, String table) {
this.conf = conf;
this.metaClients = metaClients;
this.fileIO = fileIO;
this.fullName = catalogName + "." + database + "." + table;
this.database = database;
this.tableName = table;
this.lockAcquireTimeout =
conf.getLong(HIVE_ACQUIRE_LOCK_TIMEOUT_MS, HIVE_ACQUIRE_LOCK_TIMEOUT_MS_DEFAULT);
this.lockCheckMinWaitTime =
conf.getLong(HIVE_LOCK_CHECK_MIN_WAIT_MS, HIVE_LOCK_CHECK_MIN_WAIT_MS_DEFAULT);
this.lockCheckMaxWaitTime =
conf.getLong(HIVE_LOCK_CHECK_MAX_WAIT_MS, HIVE_LOCK_CHECK_MAX_WAIT_MS_DEFAULT);
long tableLevelLockCacheEvictionTimeout =
conf.getLong(HIVE_TABLE_LEVEL_LOCK_EVICT_MS, HIVE_TABLE_LEVEL_LOCK_EVICT_MS_DEFAULT);
initTableLevelLockCache(tableLevelLockCacheEvictionTimeout);
}
@Override
protected String tableName() {
return fullName;
}
@Override
public FileIO io() {
return fileIO;
}
@Override
protected void doRefresh() {
String metadataLocation = null;
try {
Table table = metaClients.run(client -> client.getTable(database, tableName));
validateTableIsIceberg(table, fullName);
metadataLocation = table.getParameters().get(METADATA_LOCATION_PROP);
} catch (NoSuchObjectException e) {
if (currentMetadataLocation() != null) {
throw new NoSuchTableException("No such table: %s.%s", database, tableName);
}
} catch (TException e) {
String errMsg = String.format("Failed to get table info from metastore %s.%s", database, tableName);
throw new RuntimeException(errMsg, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted during refresh", e);
}
refreshFromMetadataLocation(metadataLocation);
}
@SuppressWarnings("checkstyle:CyclomaticComplexity")
@Override
protected void doCommit(TableMetadata base, TableMetadata metadata) {
String newMetadataLocation = writeNewMetadata(metadata, currentVersion() + 1);
boolean hiveEngineEnabled = hiveEngineEnabled(metadata, conf);
CommitStatus commitStatus = CommitStatus.FAILURE;
boolean updateHiveTable = false;
Optional<Long> lockId = Optional.empty();
// getting a process-level lock per table to avoid concurrent commit attempts to the same table from the same
// JVM process, which would result in unnecessary and costly HMS lock acquisition requests
ReentrantLock tableLevelMutex = commitLockCache.get(fullName, t -> new ReentrantLock(true));
tableLevelMutex.lock();
try {
lockId = Optional.of(acquireLock());
// TODO add lock heart beating for cases where default lock timeout is too low.
Table tbl = loadHmsTable();
if (tbl != null) {
// If we try to create the table but the metadata location is already set, then we had a concurrent commit
if (base == null && tbl.getParameters().get(BaseMetastoreTableOperations.METADATA_LOCATION_PROP) != null) {
throw new AlreadyExistsException("Table already exists: %s.%s", database, tableName);
}
updateHiveTable = true;
LOG.debug("Committing existing table: {}", fullName);
} else {
tbl = newHmsTable();
LOG.debug("Committing new table: {}", fullName);
}
tbl.setSd(storageDescriptor(metadata, hiveEngineEnabled)); // set to pickup any schema changes
String metadataLocation = tbl.getParameters().get(METADATA_LOCATION_PROP);
String baseMetadataLocation = base != null ? base.metadataFileLocation() : null;
if (!Objects.equals(baseMetadataLocation, metadataLocation)) {
throw new CommitFailedException(
"Base metadata location '%s' is not same as the current table metadata location '%s' for %s.%s",
baseMetadataLocation, metadataLocation, database, tableName);
}
// get Iceberg props that have been removed
Set<String> removedProps = Collections.emptySet();
if (base != null) {
removedProps = base.properties().keySet().stream()
.filter(key -> !metadata.properties().containsKey(key))
.collect(Collectors.toSet());
}
Map<String, String> summary = Optional.ofNullable(metadata.currentSnapshot())
.map(Snapshot::summary)
.orElseGet(ImmutableMap::of);
setHmsTableParameters(newMetadataLocation, tbl, metadata.properties(), removedProps, hiveEngineEnabled, summary);
try {
persistTable(tbl, updateHiveTable);
commitStatus = CommitStatus.SUCCESS;
} catch (Throwable persistFailure) {
LOG.error("Cannot tell if commit to {}.{} succeeded, attempting to reconnect and check.",
database, tableName, persistFailure);
commitStatus = checkCommitStatus(newMetadataLocation, metadata);
switch (commitStatus) {
case SUCCESS:
break;
case FAILURE:
throw persistFailure;
case UNKNOWN:
throw new CommitStateUnknownException(persistFailure);
}
}
} catch (org.apache.hadoop.hive.metastore.api.AlreadyExistsException e) {
throw new AlreadyExistsException("Table already exists: %s.%s", database, tableName);
} catch (TException | UnknownHostException e) {
if (e.getMessage() != null && e.getMessage().contains("Table/View 'HIVE_LOCKS' does not exist")) {
throw new RuntimeException("Failed to acquire locks from metastore because 'HIVE_LOCKS' doesn't " +
"exist, this probably happened when using embedded metastore or doesn't create a " +
"transactional meta table. To fix this, use an alternative metastore", e);
}
throw new RuntimeException(String.format("Metastore operation failed for %s.%s", database, tableName), e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted during commit", e);
} finally {
cleanupMetadataAndUnlock(commitStatus, newMetadataLocation, lockId, tableLevelMutex);
}
}
@VisibleForTesting
void persistTable(Table hmsTable, boolean updateHiveTable) throws TException, InterruptedException {
if (updateHiveTable) {
metaClients.run(client -> {
EnvironmentContext envContext = new EnvironmentContext(
ImmutableMap.of(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE)
);
ALTER_TABLE.invoke(client, database, tableName, hmsTable, envContext);
return null;
});
} else {
metaClients.run(client -> {
client.createTable(hmsTable);
return null;
});
}
}
private Table loadHmsTable() throws TException, InterruptedException {
try {
return metaClients.run(client -> client.getTable(database, tableName));
} catch (NoSuchObjectException nte) {
LOG.trace("Table not found {}", fullName, nte);
return null;
}
}
private Table newHmsTable() {
final long currentTimeMillis = System.currentTimeMillis();
Table newTable = new Table(tableName,
database,
System.getProperty("user.name"),
(int) currentTimeMillis / 1000,
(int) currentTimeMillis / 1000,
Integer.MAX_VALUE,
null,
Collections.emptyList(),
new HashMap<>(),
null,
null,
TableType.EXTERNAL_TABLE.toString());
newTable.getParameters().put("EXTERNAL", "TRUE"); // using the external table type also requires this
return newTable;
}
private void setHmsTableParameters(String newMetadataLocation, Table tbl, Map<String, String> icebergTableProps,
Set<String> obsoleteProps, boolean hiveEngineEnabled,
Map<String, String> summary) {
Map<String, String> parameters = Optional.ofNullable(tbl.getParameters())
.orElseGet(HashMap::new);
// push all Iceberg table properties into HMS
icebergTableProps.forEach((key, value) -> {
// translate key names between Iceberg and HMS where needed
String hmsKey = ICEBERG_TO_HMS_TRANSLATION.getOrDefault(key, key);
parameters.put(hmsKey, value);
});
// remove any props from HMS that are no longer present in Iceberg table props
obsoleteProps.forEach(parameters::remove);
parameters.put(TABLE_TYPE_PROP, ICEBERG_TABLE_TYPE_VALUE.toUpperCase(Locale.ENGLISH));
parameters.put(METADATA_LOCATION_PROP, newMetadataLocation);
if (currentMetadataLocation() != null && !currentMetadataLocation().isEmpty()) {
parameters.put(PREVIOUS_METADATA_LOCATION_PROP, currentMetadataLocation());
}
// If needed set the 'storage_handler' property to enable query from Hive
if (hiveEngineEnabled) {
parameters.put(hive_metastoreConstants.META_TABLE_STORAGE,
"org.apache.iceberg.mr.hive.HiveIcebergStorageHandler");
} else {
parameters.remove(hive_metastoreConstants.META_TABLE_STORAGE);
}
// Set the basic statistics
if (summary.get(SnapshotSummary.TOTAL_DATA_FILES_PROP) != null) {
parameters.put(StatsSetupConst.NUM_FILES, summary.get(SnapshotSummary.TOTAL_DATA_FILES_PROP));
}
if (summary.get(SnapshotSummary.TOTAL_RECORDS_PROP) != null) {
parameters.put(StatsSetupConst.ROW_COUNT, summary.get(SnapshotSummary.TOTAL_RECORDS_PROP));
}
if (summary.get(SnapshotSummary.TOTAL_FILE_SIZE_PROP) != null) {
parameters.put(StatsSetupConst.TOTAL_SIZE, summary.get(SnapshotSummary.TOTAL_FILE_SIZE_PROP));
}
tbl.setParameters(parameters);
}
private StorageDescriptor storageDescriptor(TableMetadata metadata, boolean hiveEngineEnabled) {
final StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(HiveSchemaUtil.convert(metadata.schema()));
storageDescriptor.setLocation(metadata.location());
SerDeInfo serDeInfo = new SerDeInfo();
if (hiveEngineEnabled) {
storageDescriptor.setInputFormat("org.apache.iceberg.mr.hive.HiveIcebergInputFormat");
storageDescriptor.setOutputFormat("org.apache.iceberg.mr.hive.HiveIcebergOutputFormat");
serDeInfo.setSerializationLib("org.apache.iceberg.mr.hive.HiveIcebergSerDe");
} else {
storageDescriptor.setOutputFormat("org.apache.hadoop.mapred.FileOutputFormat");
storageDescriptor.setInputFormat("org.apache.hadoop.mapred.FileInputFormat");
serDeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
}
storageDescriptor.setSerdeInfo(serDeInfo);
return storageDescriptor;
}
@VisibleForTesting
long acquireLock() throws UnknownHostException, TException, InterruptedException {
final LockComponent lockComponent = new LockComponent(LockType.EXCLUSIVE, LockLevel.TABLE, database);
lockComponent.setTablename(tableName);
final LockRequest lockRequest = new LockRequest(Lists.newArrayList(lockComponent),
System.getProperty("user.name"),
InetAddress.getLocalHost().getHostName());
LockResponse lockResponse = metaClients.run(client -> client.lock(lockRequest));
AtomicReference<LockState> state = new AtomicReference<>(lockResponse.getState());
long lockId = lockResponse.getLockid();
final long start = System.currentTimeMillis();
long duration = 0;
boolean timeout = false;
try {
if (state.get().equals(LockState.WAITING)) {
// Retry count is the typical "upper bound of retries" for Tasks.run() function. In fact, the maximum number of
// attempts the Tasks.run() would try is `retries + 1`. Here, for checking locks, we use timeout as the
// upper bound of retries. So it is just reasonable to set a large retry count. However, if we set
// Integer.MAX_VALUE, the above logic of `retries + 1` would overflow into Integer.MIN_VALUE. Hence,
// the retry is set conservatively as `Integer.MAX_VALUE - 100` so it doesn't hit any boundary issues.
Tasks.foreach(lockId)
.retry(Integer.MAX_VALUE - 100)
.exponentialBackoff(
lockCheckMinWaitTime,
lockCheckMaxWaitTime,
lockAcquireTimeout,
1.5)
.throwFailureWhenFinished()
.onlyRetryOn(WaitingForLockException.class)
.run(id -> {
try {
LockResponse response = metaClients.run(client -> client.checkLock(id));
LockState newState = response.getState();
state.set(newState);
if (newState.equals(LockState.WAITING)) {
throw new WaitingForLockException("Waiting for lock.");
}
} catch (InterruptedException e) {
Thread.interrupted(); // Clear the interrupt status flag
LOG.warn("Interrupted while waiting for lock.", e);
}
}, TException.class);
}
} catch (WaitingForLockException waitingForLockException) {
timeout = true;
duration = System.currentTimeMillis() - start;
} finally {
if (!state.get().equals(LockState.ACQUIRED)) {
unlock(Optional.of(lockId));
}
}
// timeout and do not have lock acquired
if (timeout && !state.get().equals(LockState.ACQUIRED)) {
throw new CommitFailedException("Timed out after %s ms waiting for lock on %s.%s",
duration, database, tableName);
}
if (!state.get().equals(LockState.ACQUIRED)) {
throw new CommitFailedException("Could not acquire the lock on %s.%s, " +
"lock request ended in state %s", database, tableName, state);
}
return lockId;
}
private void cleanupMetadataAndUnlock(CommitStatus commitStatus, String metadataLocation, Optional<Long> lockId,
ReentrantLock tableLevelMutex) {
try {
if (commitStatus == CommitStatus.FAILURE) {
// If we are sure the commit failed, clean up the uncommitted metadata file
io().deleteFile(metadataLocation);
}
} catch (RuntimeException e) {
LOG.error("Fail to cleanup metadata file at {}", metadataLocation, e);
throw e;
} finally {
unlock(lockId);
tableLevelMutex.unlock();
}
}
private void unlock(Optional<Long> lockId) {
if (lockId.isPresent()) {
try {
doUnlock(lockId.get());
} catch (Exception e) {
LOG.warn("Failed to unlock {}.{}", database, tableName, e);
}
}
}
@VisibleForTesting
void doUnlock(long lockId) throws TException, InterruptedException {
metaClients.run(client -> {
client.unlock(lockId);
return null;
});
}
static void validateTableIsIceberg(Table table, String fullName) {
String tableType = table.getParameters().get(TABLE_TYPE_PROP);
NoSuchIcebergTableException.check(tableType != null && tableType.equalsIgnoreCase(ICEBERG_TABLE_TYPE_VALUE),
"Not an iceberg table: %s (type=%s)", fullName, tableType);
}
/**
* Returns if the hive engine related values should be enabled on the table, or not.
* <p>
* The decision is made like this:
* <ol>
* <li>Table property value {@link TableProperties#ENGINE_HIVE_ENABLED}
* <li>If the table property is not set then check the hive-site.xml property value
* {@link ConfigProperties#ENGINE_HIVE_ENABLED}
* <li>If none of the above is enabled then use the default value {@link TableProperties#ENGINE_HIVE_ENABLED_DEFAULT}
* </ol>
* @param metadata Table metadata to use
* @param conf The hive configuration to use
* @return if the hive engine related values should be enabled or not
*/
private static boolean hiveEngineEnabled(TableMetadata metadata, Configuration conf) {
if (metadata.properties().get(TableProperties.ENGINE_HIVE_ENABLED) != null) {
// We know that the property is set, so default value will not be used,
return metadata.propertyAsBoolean(TableProperties.ENGINE_HIVE_ENABLED, false);
}
return conf.getBoolean(ConfigProperties.ENGINE_HIVE_ENABLED, TableProperties.ENGINE_HIVE_ENABLED_DEFAULT);
}
}
| 1 | 44,597 | Nit: Does this make more sense as `iceberg.hive.metadata...`? The rest of the configs seem to start with `iceberg.hive`. | apache-iceberg | java |
@@ -184,6 +184,17 @@ func ValidateACMEIssuerDNS01Config(iss *v1alpha1.ACMEIssuerDNS01Config, fldPath
}
}
}
+ if p.DNSMadeEasy != nil {
+ if numProviders > 0 {
+ el = append(el, field.Forbidden(fldPath.Child("dnsmadeeasy"), "may not specify more than one provider type"))
+ } else {
+ numProviders++
+ el = append(el, ValidateSecretKeySelector(&p.DNSMadeEasy.SecretKey, fldPath.Child("dnsmadeeasy", "secretKeySecretRef"))...)
+ if len(p.DNSMadeEasy.APIKey) == 0 {
+ el = append(el, field.Required(fldPath.Child("dnsmadeeasy", "apiKey"), ""))
+ }
+ }
+ }
if p.Route53 != nil {
if numProviders > 0 {
el = append(el, field.Forbidden(fldPath.Child("route53"), "may not specify more than one provider type")) | 1 | /*
Copyright 2018 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"k8s.io/apimachinery/pkg/util/validation/field"
"github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha1"
)
// Validation functions for cert-manager v1alpha1 Issuer types
func ValidateIssuer(iss *v1alpha1.Issuer) field.ErrorList {
allErrs := ValidateIssuerSpec(&iss.Spec, field.NewPath("spec"))
return allErrs
}
func ValidateIssuerSpec(iss *v1alpha1.IssuerSpec, fldPath *field.Path) field.ErrorList {
el := field.ErrorList{}
el = ValidateIssuerConfig(&iss.IssuerConfig, fldPath)
return el
}
func ValidateIssuerConfig(iss *v1alpha1.IssuerConfig, fldPath *field.Path) field.ErrorList {
numConfigs := 0
el := field.ErrorList{}
if iss.ACME != nil {
if numConfigs > 0 {
el = append(el, field.Forbidden(fldPath.Child("acme"), "may not specify more than one issuer type"))
} else {
numConfigs++
el = append(el, ValidateACMEIssuerConfig(iss.ACME, fldPath.Child("acme"))...)
}
}
if iss.CA != nil {
if numConfigs > 0 {
el = append(el, field.Forbidden(fldPath.Child("ca"), "may not specify more than one issuer type"))
} else {
numConfigs++
el = append(el, ValidateCAIssuerConfig(iss.CA, fldPath.Child("ca"))...)
}
}
if iss.SelfSigned != nil {
if numConfigs > 0 {
el = append(el, field.Forbidden(fldPath.Child("selfSigned"), "may not specify more than one issuer type"))
} else {
numConfigs++
el = append(el, ValidateSelfSignedIssuerConfig(iss.SelfSigned, fldPath.Child("selfSigned"))...)
}
}
if iss.Vault != nil {
if numConfigs > 0 {
el = append(el, field.Forbidden(fldPath.Child("vault"), "may not specify more than one issuer type"))
} else {
numConfigs++
el = append(el, ValidateVaultIssuerConfig(iss.Vault, fldPath.Child("vault"))...)
}
}
if numConfigs == 0 {
el = append(el, field.Required(fldPath, "at least one issuer must be configured"))
}
return el
}
func ValidateACMEIssuerConfig(iss *v1alpha1.ACMEIssuer, fldPath *field.Path) field.ErrorList {
el := field.ErrorList{}
if len(iss.Email) == 0 {
el = append(el, field.Required(fldPath.Child("email"), "email address is a required field"))
}
if len(iss.PrivateKey.Name) == 0 {
el = append(el, field.Required(fldPath.Child("privateKey", "name"), "private key secret name is a required field"))
}
if len(iss.Server) == 0 {
el = append(el, field.Required(fldPath.Child("server"), "acme server URL is a required field"))
}
if iss.HTTP01 != nil {
el = append(el, ValidateACMEIssuerHTTP01Config(iss.HTTP01, fldPath.Child("http01"))...)
}
if iss.DNS01 != nil {
el = append(el, ValidateACMEIssuerDNS01Config(iss.DNS01, fldPath.Child("dns01"))...)
}
return el
}
func ValidateCAIssuerConfig(iss *v1alpha1.CAIssuer, fldPath *field.Path) field.ErrorList {
el := field.ErrorList{}
if len(iss.SecretName) == 0 {
el = append(el, field.Required(fldPath.Child("secretName"), ""))
}
return el
}
func ValidateSelfSignedIssuerConfig(iss *v1alpha1.SelfSignedIssuer, fldPath *field.Path) field.ErrorList {
return nil
}
func ValidateVaultIssuerConfig(iss *v1alpha1.VaultIssuer, fldPath *field.Path) field.ErrorList {
el := field.ErrorList{}
if len(iss.Server) == 0 {
el = append(el, field.Required(fldPath.Child("server"), ""))
}
if len(iss.Path) == 0 {
el = append(el, field.Required(fldPath.Child("path"), ""))
}
return el
// TODO: add validation for Vault authentication types
}
func ValidateACMEIssuerHTTP01Config(iss *v1alpha1.ACMEIssuerHTTP01Config, fldPath *field.Path) field.ErrorList {
return nil
}
func ValidateACMEIssuerDNS01Config(iss *v1alpha1.ACMEIssuerDNS01Config, fldPath *field.Path) field.ErrorList {
el := field.ErrorList{}
providersFldPath := fldPath.Child("providers")
for i, p := range iss.Providers {
fldPath := providersFldPath.Index(i)
if len(p.Name) == 0 {
el = append(el, field.Required(fldPath.Child("name"), "name must be specified"))
}
numProviders := 0
if p.Akamai != nil {
numProviders++
el = append(el, ValidateSecretKeySelector(&p.Akamai.AccessToken, fldPath.Child("akamai", "accessToken"))...)
el = append(el, ValidateSecretKeySelector(&p.Akamai.ClientSecret, fldPath.Child("akamai", "clientSecret"))...)
el = append(el, ValidateSecretKeySelector(&p.Akamai.ClientToken, fldPath.Child("akamai", "clientToken"))...)
if len(p.Akamai.ServiceConsumerDomain) == 0 {
el = append(el, field.Required(fldPath.Child("akamai", "serviceConsumerDomain"), ""))
}
}
if p.AzureDNS != nil {
if numProviders > 0 {
el = append(el, field.Forbidden(fldPath.Child("azuredns"), "may not specify more than one provider type"))
} else {
numProviders++
el = append(el, ValidateSecretKeySelector(&p.AzureDNS.ClientSecret, fldPath.Child("azuredns", "clientSecretSecretRef"))...)
if len(p.AzureDNS.ClientID) == 0 {
el = append(el, field.Required(fldPath.Child("azuredns", "clientID"), ""))
}
if len(p.AzureDNS.SubscriptionID) == 0 {
el = append(el, field.Required(fldPath.Child("azuredns", "subscriptionID"), ""))
}
if len(p.AzureDNS.TenantID) == 0 {
el = append(el, field.Required(fldPath.Child("azuredns", "tenantID"), ""))
}
if len(p.AzureDNS.ResourceGroupName) == 0 {
el = append(el, field.Required(fldPath.Child("azuredns", "resourceGroupName"), ""))
}
}
}
if p.CloudDNS != nil {
if numProviders > 0 {
el = append(el, field.Forbidden(fldPath.Child("clouddns"), "may not specify more than one provider type"))
} else {
numProviders++
el = append(el, ValidateSecretKeySelector(&p.CloudDNS.ServiceAccount, fldPath.Child("clouddns", "serviceAccountSecretRef"))...)
if len(p.CloudDNS.Project) == 0 {
el = append(el, field.Required(fldPath.Child("clouddns", "project"), ""))
}
}
}
if p.Cloudflare != nil {
if numProviders > 0 {
el = append(el, field.Forbidden(fldPath.Child("cloudflare"), "may not specify more than one provider type"))
} else {
numProviders++
el = append(el, ValidateSecretKeySelector(&p.Cloudflare.APIKey, fldPath.Child("cloudflare", "apiKeySecretRef"))...)
if len(p.Cloudflare.Email) == 0 {
el = append(el, field.Required(fldPath.Child("cloudflare", "email"), ""))
}
}
}
if p.Route53 != nil {
if numProviders > 0 {
el = append(el, field.Forbidden(fldPath.Child("route53"), "may not specify more than one provider type"))
} else {
numProviders++
// region is the only required field for route53 as ambient credentials can be used instead
if len(p.Route53.Region) == 0 {
el = append(el, field.Required(fldPath.Child("route53", "region"), ""))
}
}
}
if p.AcmeDNS != nil {
numProviders++
el = append(el, ValidateSecretKeySelector(&p.AcmeDNS.AccountSecret, fldPath.Child("acmedns", "accountSecretRef"))...)
if len(p.AcmeDNS.Host) == 0 {
el = append(el, field.Required(fldPath.Child("acmedns", "host"), ""))
}
}
if numProviders == 0 {
el = append(el, field.Required(fldPath, "at least one provider must be configured"))
}
}
return el
}
func ValidateSecretKeySelector(sks *v1alpha1.SecretKeySelector, fldPath *field.Path) field.ErrorList {
el := field.ErrorList{}
if sks.Name == "" {
el = append(el, field.Required(fldPath.Child("name"), "secret name is required"))
}
if sks.Key == "" {
el = append(el, field.Required(fldPath.Child("key"), "secret key is required"))
}
return el
}
| 1 | 13,680 | Is `baseURL` not required? | jetstack-cert-manager | go |
@@ -101,6 +101,7 @@ class EmbeddingRPNHead(nn.Module):
"""Forward function in testing stage."""
return self._decode_init_proposals(img, img_metas)
+ # TODO: is right ?
def show_result(self, data):
"""Show the init proposals in EmbeddingRPN.
| 1 | import mmcv
import torch
import torch.nn as nn
from mmcv import tensor2imgs
from mmdet.models.builder import HEADS
from ...core import bbox_cxcywh_to_xyxy
@HEADS.register_module()
class EmbeddingRPNHead(nn.Module):
"""RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ .
Unlike traditional RPNHead, this module does not need FPN input, but just
decode `init_proposal_bboxes` and expand the first dimension of
`init_proposal_bboxes` and `init_proposal_features` to the batch_size.
Args:
num_proposals (int): Number of init_proposals. Default 100.
proposal_feature_channel (int): Channel number of
init_proposal_feature. Defaults to 256.
"""
def __init__(self,
num_proposals=100,
proposal_feature_channel=256,
**kwargs):
super(EmbeddingRPNHead, self).__init__()
self.num_proposals = num_proposals
self.proposal_feature_channel = proposal_feature_channel
self._init_layers()
def _init_layers(self):
"""Initialize a sparse set of proposal boxes and proposal features."""
self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)
self.init_proposal_features = nn.Embedding(
self.num_proposals, self.proposal_feature_channel)
def init_weights(self):
"""Initialize the init_proposal_bboxes as normalized.
[c_x, c_y, w, h], and we initialize it to the size of the entire
image.
"""
nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5)
nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1)
def _decode_init_proposals(self, imgs, img_metas):
"""Decode init_proposal_bboxes according to the size of images and
expand dimension of init_proposal_features to batch_size.
Args:
imgs (list[Tensor]): List of FPN features.
img_metas (list[dict]): List of meta-information of
images. Need the img_shape to decode the init_proposals.
Returns:
Tuple(Tensor):
- proposals (Tensor): Decoded proposal bboxes,
has shape (batch_size, num_proposals, 4).
- init_proposal_features (Tensor): Expanded proposal
features, has shape
(batch_size, num_proposals, proposal_feature_channel).
- imgs_whwh (Tensor): Tensor with shape
(batch_size, 4), the dimension means
[img_width, img_height, img_width, img_height].
"""
proposals = self.init_proposal_bboxes.weight.clone()
proposals = bbox_cxcywh_to_xyxy(proposals)
num_imgs = len(imgs[0])
imgs_whwh = []
for meta in img_metas:
h, w, _ = meta['img_shape']
imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]]))
imgs_whwh = torch.cat(imgs_whwh, dim=0)
imgs_whwh = imgs_whwh[:, None, :]
# imgs_whwh has shape (batch_size, 1, 4)
# The shape of proposals change from (num_proposals, 4)
# to (batch_size ,num_proposals, 4)
proposals = proposals * imgs_whwh
init_proposal_features = self.init_proposal_features.weight.clone()
init_proposal_features = init_proposal_features[None].expand(
num_imgs, *init_proposal_features.size())
return proposals, init_proposal_features, imgs_whwh
def forward_dummy(self, img, img_metas):
"""Dummy forward function.
Used in flops calculation.
"""
return self._decode_init_proposals(img, img_metas)
def forward_train(self, img, img_metas):
"""Forward function in training stage."""
return self._decode_init_proposals(img, img_metas)
def simple_test_rpn(self, img, img_metas):
"""Forward function in testing stage."""
return self._decode_init_proposals(img, img_metas)
def show_result(self, data):
"""Show the init proposals in EmbeddingRPN.
Args:
data (dict): Dict contains image and
corresponding meta information.
"""
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
proposals, _ = self._decode_init_proposals(data['img'],
data['img_metas'])
assert len(imgs) == len(img_metas)
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
mmcv.imshow_bboxes(img_show, proposals)
| 1 | 22,732 | Just delete it, `embedding_rpn_head` is not a `Detector` which should have `show_result`. | open-mmlab-mmdetection | py |
@@ -7,6 +7,12 @@ namespace Datadog.Trace.ClrProfiler.ExtensionMethods
internal static string GetHttpMethod(this ISpan span)
=> span.GetTag(Tags.HttpMethod);
+ internal static string GetHost(this ISpan span)
+ => span.GetTag(Tags.HttpRequestHeadersHost);
+
+ internal static string GetAbsoluteUrl(this ISpan span)
+ => span.GetTag(Tags.HttpUrl);
+
internal static void DecorateWebServerSpan(
this Span span,
string resourceName, | 1 | using Datadog.Trace.Interfaces;
namespace Datadog.Trace.ClrProfiler.ExtensionMethods
{
internal static class SpanExtensions
{
internal static string GetHttpMethod(this ISpan span)
=> span.GetTag(Tags.HttpMethod);
internal static void DecorateWebServerSpan(
this Span span,
string resourceName,
string method,
string host,
string httpUrl)
{
span.Type = SpanTypes.Web;
span.ResourceName = resourceName?.Trim();
span.SetTag(Tags.SpanKind, SpanKinds.Server);
span.SetTag(Tags.HttpMethod, method);
span.SetTag(Tags.HttpRequestHeadersHost, host);
span.SetTag(Tags.HttpUrl, httpUrl);
}
}
}
| 1 | 15,204 | Where do we _get_ tag values? edit: I mean, where are these `GetFoo()` methods called from? It just seems weird to me that we are getting tag values from spans. I thought we only ever _set_ tags on spans. | DataDog-dd-trace-dotnet | .cs |
@@ -197,6 +197,7 @@ func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
if err != nil {
return nil, err
}
+ log.Printf("Reading kzip with %v encoding", pref)
return &Reader{
zip: archive,
root: root, | 1 | /*
* Copyright 2018 The Kythe Authors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package kzip implements the kzip compilation storage file format.
//
// The package exports two types of interest: A kzip.Reader can be used to read
// the contents of an existing kzip archive, and a kzip.Writer can be used to
// construct a new kzip archive.
//
// Reading an Archive:
//
// r, err := kzip.NewReader(file, size)
// ...
//
// // Look up a compilation record by its digest.
// unit, err := r.Lookup(unitDigest)
// ...
//
// // Scan all the compilation records stored.
// err := r.Scan(func(unit *kzip.Unit) error {
// if hasInterestingProperty(unit) {
// doStuffWith(unit)
// }
// return nil
// })
//
// // Open a reader for a stored file.
// rc, err := r.Open(fileDigest)
// ...
// defer rc.Close()
//
// // Read the complete contents of a stored file.
// bits, err := r.ReadAll(fileDigest)
// ...
//
// Writing an Archive:
//
// w, err := kzip.NewWriter(file)
// ...
//
// // Add a compilation record and (optional) index data.
// udigest, err := w.AddUnit(unit, nil)
// ...
//
// // Add file contents.
// fdigest, err := w.AddFile(file)
// ...
//
package kzip // import "kythe.io/kythe/go/platform/kzip"
import (
"archive/zip"
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path"
"sort"
"strconv"
"strings"
"sync"
"time"
"kythe.io/kythe/go/platform/kcd/kythe"
"bitbucket.org/creachadair/stringset"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"golang.org/x/sync/errgroup"
apb "kythe.io/kythe/proto/analysis_go_proto"
// These are common detail messages used by Kythe compilations, and
// required for JSON (un)marshaling to work.
_ "kythe.io/kythe/proto/buildinfo_go_proto"
_ "kythe.io/kythe/proto/cxx_go_proto"
_ "kythe.io/kythe/proto/filecontext_go_proto"
_ "kythe.io/kythe/proto/go_go_proto"
_ "kythe.io/kythe/proto/java_go_proto"
)
// Encoding describes how compilation units will be encoded when written to a kzip.
type Encoding int
const (
// EncodingJSON specifies to use JSON encoding
EncodingJSON Encoding = 1
// EncodingProto specifies to use Proto encoding
EncodingProto Encoding = 2
// EncodingAll specifies to encode using all known encodings
EncodingAll Encoding = EncodingJSON | EncodingProto
prefixJSON = "units"
prefixProto = "pbunits"
)
var (
// Use a constant file modification time in the kzip so file diffs only compare the contents,
// not when the kzips were created.
modifiedTime = time.Unix(0, 0)
)
// EncodingFor converts a string to an Encoding.
func EncodingFor(v string) (Encoding, error) {
v = strings.ToUpper(v)
switch {
case v == "ALL":
return EncodingAll, nil
case v == "JSON":
return EncodingJSON, nil
case v == "PROTO":
return EncodingProto, nil
default:
return EncodingJSON, fmt.Errorf("unknown encoding %s", v)
}
}
// String stringifies an Encoding
func (e Encoding) String() string {
switch {
case e == EncodingAll:
return "All"
case e == EncodingJSON:
return "JSON"
case e == EncodingProto:
return "Proto"
default:
return "Encoding" + strconv.FormatInt(int64(e), 10)
}
}
func defaultEncoding() Encoding {
if e := os.Getenv("KYTHE_KZIP_ENCODING"); e != "" {
enc, err := EncodingFor(e)
if err == nil {
return enc
}
log.Printf("Unknown kzip encoding: %s", e)
}
return EncodingJSON
}
// A Reader permits reading and scanning compilation records and file contents
// stored in a .kzip archive. The Lookup and Scan methods are mutually safe for
// concurrent use by multiple goroutines.
type Reader struct {
zip *zip.Reader
// The archives written by this library always use "root/" for the root
// directory, but it's not required by the spec. Use whatever name the
// archive actually specifies in the leading directory.
root string
// The prefix used for the compilation unit directory; one of
// prefixJSON or prefixProto
unitsPrefix string
}
// NewReader constructs a new Reader that consumes zip data from r, whose total
// size in bytes is given.
func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
archive, err := zip.NewReader(r, size)
if err != nil {
return nil, err
}
// Order the files in the archive by path, so we can binary search.
sort.Slice(archive.File, func(i, j int) bool {
return archive.File[i].Name < archive.File[j].Name
})
if len(archive.File) == 0 {
return nil, errors.New("archive is empty")
} else if fi := archive.File[0].FileInfo(); !fi.IsDir() {
return nil, errors.New("archive root is not a directory")
}
root := archive.File[0].Name
pref, err := unitPrefix(root, archive.File)
if err != nil {
return nil, err
}
return &Reader{
zip: archive,
root: root,
unitsPrefix: pref,
}, nil
}
func unitPrefix(root string, fs []*zip.File) (string, error) {
jsonDir := root + prefixJSON + "/"
protoDir := root + prefixProto + "/"
j := sort.Search(len(fs), func(i int) bool {
return fs[i].Name > jsonDir
})
hasJSON := j < len(fs) && strings.HasPrefix(fs[j].Name, jsonDir)
p := sort.Search(len(fs), func(i int) bool {
return fs[i].Name > protoDir
})
hasProto := p < len(fs) && strings.HasPrefix(fs[p].Name, protoDir)
if hasJSON && hasProto {
// validate that they have identical units based on hash
for p < len(fs) && j < len(fs) {
ispb := strings.HasPrefix(fs[p].Name, protoDir)
isjson := strings.HasPrefix(fs[j].Name, jsonDir)
if ispb != isjson {
return "", fmt.Errorf("both proto and JSON units found but are not identical")
}
if !ispb {
break
}
pdigest := strings.Split(fs[p].Name, "/")[2]
jdigest := strings.Split(fs[j].Name, "/")[2]
if pdigest != jdigest {
return "", fmt.Errorf("both proto and JSON units found but are not identical")
}
p++
j++
}
}
if hasProto {
return prefixProto, nil
}
return prefixJSON, nil
}
func (r *Reader) unitPath(digest string) string { return path.Join(r.root, r.unitsPrefix, digest) }
func (r *Reader) filePath(digest string) string { return path.Join(r.root, "files", digest) }
// ErrDigestNotFound is returned when a requested compilation unit or file
// digest is not found.
var ErrDigestNotFound = errors.New("digest not found")
// ErrUnitExists is returned by AddUnit when adding the same compilation
// multiple times.
var ErrUnitExists = errors.New("unit already exists")
func (r *Reader) readUnit(digest string, f *zip.File) (*Unit, error) {
rc, err := f.Open()
if err != nil {
return nil, err
}
defer rc.Close()
var msg apb.IndexedCompilation
if r.unitsPrefix == prefixProto {
rec := make([]byte, f.UncompressedSize64)
if _, err = io.ReadFull(rc, rec); err != nil {
return nil, err
}
if err := proto.Unmarshal(rec, &msg); err != nil {
return nil, fmt.Errorf("error unmarshaling for %s: %s", digest, err)
}
} else if err := jsonpb.Unmarshal(rc, &msg); err != nil {
return nil, err
}
return &Unit{
Digest: digest,
Proto: msg.Unit,
Index: msg.Index,
}, nil
}
// firstIndex returns the first index in the archive's file list whose
// path starts with prefix, or -1 if no such index exists.
func (r *Reader) firstIndex(prefix string) int {
fs := r.zip.File
n := sort.Search(len(fs), func(i int) bool {
return fs[i].Name >= prefix
})
if n >= len(fs) {
return -1
}
if !strings.HasPrefix(fs[n].Name, prefix) {
return -1
}
return n
}
// Lookup returns the specified compilation from the archive, if it exists. If
// the requested digest is not in the archive, ErrDigestNotFound is returned.
func (r *Reader) Lookup(unitDigest string) (*Unit, error) {
needle := r.unitPath(unitDigest)
pos := r.firstIndex(needle)
if pos >= 0 {
if f := r.zip.File[pos]; f.Name == needle {
return r.readUnit(unitDigest, f)
}
}
return nil, ErrDigestNotFound
}
// A ScanOption configures the behavior of scanning a kzip file.
type ScanOption interface{ isScanOption() }
type readConcurrency int
func (readConcurrency) isScanOption() {}
// ReadConcurrency returns a ScanOption that configures the max concurrency of
// reading compilation units within a kzip archive.
func ReadConcurrency(n int) ScanOption {
return readConcurrency(n)
}
func (r *Reader) canonicalUnits() (string, []*zip.File) {
prefix := r.unitPath("") + "/"
pos := r.firstIndex(prefix)
if pos < 0 {
return "", nil
}
var res []*zip.File
for _, file := range r.zip.File[pos:] {
if !strings.HasPrefix(file.Name, prefix) {
break
}
if file.Name == prefix {
continue // tolerate an empty units directory entry
}
res = append(res, file)
}
return prefix, res
}
// Scan scans all the compilations stored in the archive, and invokes f for
// each compilation record. If f reports an error, the scan is terminated and
// that error is propagated to the caller of Scan. At most 1 invocation of f
// will occur at any one time.
func (r *Reader) Scan(f func(*Unit) error, opts ...ScanOption) error {
concurrency := 1
for _, opt := range opts {
switch opt := opt.(type) {
case readConcurrency:
if n := int(opt); n > 0 {
concurrency = n
}
default:
return fmt.Errorf("unknown ScanOption type: %T", opt)
}
}
prefix, fileUnits := r.canonicalUnits()
if len(fileUnits) == 0 {
return nil
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
g, ctx := errgroup.WithContext(ctx)
files := make(chan *zip.File)
g.Go(func() error {
defer close(files)
for _, file := range fileUnits {
select {
case <-ctx.Done():
return nil
case files <- file:
}
}
return nil
})
units := make(chan *Unit)
var wg sync.WaitGroup
for i := 0; i < concurrency; i++ {
wg.Add(1)
g.Go(func() error {
defer wg.Done()
for file := range files {
digest := strings.TrimPrefix(file.Name, prefix)
unit, err := r.readUnit(digest, file)
if err != nil {
return err
}
select {
case <-ctx.Done():
return nil
case units <- unit:
}
}
return nil
})
}
go func() { wg.Wait(); close(units) }()
for unit := range units {
select {
case <-ctx.Done():
return g.Wait()
default:
if err := f(unit); err != nil {
return err
}
}
}
return g.Wait()
}
// Open opens a reader on the contents of the specified file digest. If the
// requested digest is not in the archive, ErrDigestNotFound is returned. The
// caller must close the reader when it is no longer needed.
func (r *Reader) Open(fileDigest string) (io.ReadCloser, error) {
needle := r.filePath(fileDigest)
if pos := r.firstIndex(needle); pos >= 0 {
if f := r.zip.File[pos]; f.Name == needle {
return f.Open()
}
}
return nil, ErrDigestNotFound
}
// ReadAll returns the complete contents of the file with the specified digest.
// It is a convenience wrapper for Open followed by ioutil.ReadAll.
func (r *Reader) ReadAll(fileDigest string) ([]byte, error) {
f, err := r.Open(fileDigest)
if err == nil {
defer f.Close()
return ioutil.ReadAll(f)
}
return nil, err
}
// A Unit represents a compilation record read from a kzip archive.
type Unit struct {
Digest string
Proto *apb.CompilationUnit
Index *apb.IndexedCompilation_Index
}
// A Writer permits construction of a .kzip archive.
type Writer struct {
mu sync.Mutex
zip *zip.Writer
fd stringset.Set // file digests already written
ud stringset.Set // unit digests already written
c io.Closer // a closer for the underlying writer (may be nil)
encoding Encoding // What encoding to use
}
// WriterOption describes options when creating a Writer
type WriterOption func(*Writer)
// WithEncoding sets the encoding to be used by a Writer
func WithEncoding(e Encoding) WriterOption {
return func(w *Writer) {
w.encoding = e
}
}
// NewWriter constructs a new empty Writer that delivers output to w. The
// AddUnit and AddFile methods are safe for use by concurrent goroutines.
func NewWriter(w io.Writer, options ...WriterOption) (*Writer, error) {
archive := zip.NewWriter(w)
// Create an entry for the root directory, which must be first.
root := &zip.FileHeader{
Name: "root/",
Comment: "kzip root directory",
Modified: modifiedTime,
}
root.SetMode(os.ModeDir | 0755)
if _, err := archive.CreateHeader(root); err != nil {
return nil, err
}
archive.SetComment("Kythe kzip archive")
kw := &Writer{
zip: archive,
fd: stringset.New(),
ud: stringset.New(),
encoding: defaultEncoding(),
}
for _, opt := range options {
opt(kw)
}
return kw, nil
}
// NewWriteCloser behaves as NewWriter, but arranges that when the *Writer is
// closed it also closes wc.
func NewWriteCloser(wc io.WriteCloser, options ...WriterOption) (*Writer, error) {
w, err := NewWriter(wc, options...)
if err == nil {
w.c = wc
}
return w, err
}
// toJSON defines the encoding format for compilation messages.
var toJSON = &jsonpb.Marshaler{OrigName: true}
// AddUnit adds a new compilation record to be added to the archive, returning
// the hex-encoded SHA256 digest of the unit's contents. It is legal for index
// to be nil, in which case no index terms will be added.
//
// If the same compilation is added multiple times, AddUnit returns the digest
// of the duplicated compilation along with ErrUnitExists to all callers after
// the first. The existing unit is not modified.
func (w *Writer) AddUnit(cu *apb.CompilationUnit, index *apb.IndexedCompilation_Index) (string, error) {
unit := kythe.Unit{Proto: cu}
unit.Canonicalize()
digest := unit.Digest()
w.mu.Lock()
defer w.mu.Unlock()
if w.ud.Contains(digest) {
return digest, ErrUnitExists
}
if w.encoding&EncodingJSON != 0 {
f, err := w.zip.CreateHeader(newFileHeader("root", prefixJSON, digest))
if err != nil {
return "", err
}
if err := toJSON.Marshal(f, &apb.IndexedCompilation{
Unit: unit.Proto,
Index: index,
}); err != nil {
return "", err
}
}
if w.encoding&EncodingProto != 0 {
f, err := w.zip.CreateHeader(newFileHeader("root", prefixProto, digest))
if err != nil {
return "", err
}
rec, err := proto.Marshal(&apb.IndexedCompilation{
Unit: unit.Proto,
Index: index,
})
if err != nil {
return "", err
}
_, err = f.Write(rec)
if err != nil {
return "", err
}
}
w.ud.Add(digest)
return digest, nil
}
// AddFile copies the complete contents of r into the archive as a new file
// entry, returning the hex-encoded SHA256 digest of the file's contents.
func (w *Writer) AddFile(r io.Reader) (string, error) {
// Buffer the file contents and compute their digest.
// We have to do this ahead of time, because we have to provide the name of
// the file before we can start writing its contents.
var buf bytes.Buffer
hash := sha256.New()
if _, err := io.Copy(io.MultiWriter(hash, &buf), r); err != nil {
return "", err
}
digest := hex.EncodeToString(hash.Sum(nil))
w.mu.Lock()
defer w.mu.Unlock()
if w.fd.Contains(digest) {
return digest, nil // already written
}
f, err := w.zip.CreateHeader(newFileHeader("root", "files", digest))
if err != nil {
return "", err
}
if _, err := io.Copy(f, &buf); err != nil {
return "", err
}
w.fd.Add(digest)
return digest, nil
}
// Close closes the writer, flushing any remaining unwritten data out to the
// underlying zip file. It is safe to close w arbitrarily many times; all calls
// after the first will report nil.
func (w *Writer) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
if w.zip != nil {
err := w.zip.Close()
w.zip = nil
if w.c != nil {
if cerr := w.c.Close(); err == nil {
return cerr
}
}
return err
}
return nil
}
func newFileHeader(parts ...string) *zip.FileHeader {
fh := &zip.FileHeader{Name: path.Join(parts...), Method: zip.Deflate}
fh.SetMode(0600)
fh.Modified = modifiedTime
return fh
}
// Scan is a convenience function that creates a *Reader from f and invokes its
// Scan method with the given callback. Each invocation of scan is passed the
// reader associated with f, along with the current compilation unit.
func Scan(f File, scan func(*Reader, *Unit) error, opts ...ScanOption) error {
size, err := f.Seek(0, io.SeekEnd)
if err != nil {
return fmt.Errorf("getting file size: %v", err)
}
r, err := NewReader(f, size)
if err != nil {
return err
}
return r.Scan(func(unit *Unit) error {
return scan(r, unit)
}, opts...)
}
// A File represents the file capabilities needed to scan a kzip file.
type File interface {
io.ReaderAt
io.Seeker
}
| 1 | 11,880 | This seems like log spam. Maybe expose the encoding as a method and log at the point of interest? | kythe-kythe | go |
@@ -464,7 +464,7 @@ PCO_PROTOCOL_TYPES = {
0x000c: 'P-CSCF IPv4 Address Request',
0x0010: 'IPv4 Link MTU Request',
0x8021: 'IPCP',
- 0xc023: 'Password Authentification Protocol',
+ 0xc023: 'Password Authentication Protocol',
0xc223: 'Challenge Handshake Authentication Protocol',
}
| 1 | #! /usr/bin/env python
# Copyright (C) 2017 Alessio Deiana <[email protected]>
# 2017 Alexis Sultan <[email protected]>
# This file is part of Scapy
# Scapy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# any later version.
#
# Scapy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scapy. If not, see <http://www.gnu.org/licenses/>.
# scapy.contrib.description = GTPv2
# scapy.contrib.status = loads
import logging
import struct
import time
from scapy.compat import orb
from scapy.fields import BitEnumField, BitField, ByteEnumField, ByteField, \
ConditionalField, IntField, IPField, LongField, PacketField, \
PacketListField, ShortEnumField, ShortField, StrFixedLenField, \
StrLenField, ThreeBytesField, XBitField, XIntField, XShortField
from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import IP6Field
from scapy.packet import bind_layers, Packet, Raw
from scapy.volatile import RandIP, RandShort
from scapy.contrib import gtp
RATType = {
6: "EUTRAN",
}
GTPmessageType = {1: "echo_request",
2: "echo_response",
32: "create_session_req",
33: "create_session_res",
34: "modify_bearer_req",
35: "modify_bearer_res",
36: "delete_session_req",
37: "delete_session_res",
70: "downlink_data_notif_failure_indic",
170: "realease_bearers_req",
171: "realease_bearers_res",
176: "downlink_data_notif",
177: "downlink_data_notif_ack",
}
IEType = {1: "IMSI",
2: "Cause",
3: "Recovery Restart",
71: "APN",
72: "AMBR",
73: "EPS Bearer ID",
74: "IPv4",
75: "MEI",
76: "MSISDN",
77: "Indication",
78: "Protocol Configuration Options",
79: "PAA",
80: "Bearer QoS",
82: "RAT",
83: "Serving Network",
86: "ULI",
87: "F-TEID",
93: "Bearer Context",
94: "Charging ID",
95: "Charging Characteristics",
99: "PDN Type",
114: "UE Time zone",
126: "Port Number",
127: "APN Restriction",
128: "Selection Mode",
161: "Max MBR/APN-AMBR (MMBR)"
}
CauseValues = {
16: "Request Accepted",
}
class GTPHeader(Packet):
# 3GPP TS 29.060 V9.1.0 (2009-12)
# without the version
name = "GTP v2 Header"
fields_desc = [BitField("version", 2, 3),
BitField("P", 1, 1),
BitField("T", 1, 1),
BitField("SPARE", 0, 1),
BitField("SPARE", 0, 1),
BitField("SPARE", 0, 1),
ByteEnumField("gtp_type", None, GTPmessageType),
ShortField("length", None),
ConditionalField(IntField("teid", 0),
lambda pkt:pkt.T == 1),
ThreeBytesField("seq", RandShort()),
ByteField("SPARE", 0)
]
def post_build(self, p, pay):
p += pay
if self.length is None:
l = len(p) - 8
p = p[:2] + struct.pack("!H", l) + p[4:]
return p
def hashret(self):
return struct.pack("B", self.version) + self.payload.hashret()
def answers(self, other):
return (isinstance(other, GTPHeader) and
self.version == other.version and
self.payload.answers(other.payload))
class IE_IPv4(gtp.IE_Base):
name = "IE IPv4"
fields_desc = [ByteEnumField("ietype", 74, IEType),
ShortField("length", 0),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
IPField("address", RandIP())]
class IE_MEI(gtp.IE_Base):
name = "IE MEI"
fields_desc = [ByteEnumField("ietype", 75, IEType),
ShortField("length", 0),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
LongField("MEI", 0)]
def IE_Dispatcher(s):
"""Choose the correct Information Element class."""
# Get the IE type
ietype = orb(s[0])
cls = ietypecls.get(ietype, Raw)
# if ietype greater than 128 are TLVs
if cls is Raw and ietype > 128:
cls = IE_NotImplementedTLV
return cls(s)
class IE_EPSBearerID(gtp.IE_Base):
name = "IE EPS Bearer ID"
fields_desc = [ByteEnumField("ietype", 73, IEType),
ShortField("length", 0),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
ByteField("EBI", 0)]
class IE_RAT(gtp.IE_Base):
name = "IE RAT"
fields_desc = [ByteEnumField("ietype", 82, IEType),
ShortField("length", 0),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
ByteEnumField("RAT_type", None, RATType)]
class IE_ServingNetwork(gtp.IE_Base):
name = "IE Serving Network"
fields_desc = [ByteEnumField("ietype", 83, IEType),
ShortField("length", 0),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
gtp.TBCDByteField("MCC", "", 2),
gtp.TBCDByteField("MNC", "", 1)]
class ULI_RAI(gtp.IE_Base):
name = "IE Tracking Area Identity"
fields_desc = [
gtp.TBCDByteField("MCC", "", 2),
# MNC: if the third digit of MCC is 0xf, then the length of
# MNC is 1 byte
gtp.TBCDByteField("MNC", "", 1),
ShortField("LAC", 0),
ShortField("RAC", 0)]
class ULI_SAI(gtp.IE_Base):
name = "IE Tracking Area Identity"
fields_desc = [
gtp.TBCDByteField("MCC", "", 2),
gtp.TBCDByteField("MNC", "", 1),
ShortField("LAC", 0),
ShortField("SAC", 0)]
class ULI_TAI(gtp.IE_Base):
name = "IE Tracking Area Identity"
fields_desc = [
gtp.TBCDByteField("MCC", "", 2),
gtp.TBCDByteField("MNC", "", 1),
ShortField("TAC", 0)]
class ULI_ECGI(gtp.IE_Base):
name = "IE E-UTRAN Cell Identifier"
fields_desc = [
gtp.TBCDByteField("MCC", "", 2),
gtp.TBCDByteField("MNC", "", 1),
BitField("SPARE", 0, 4),
BitField("ECI", 0, 28)]
class IE_ULI(gtp.IE_Base):
name = "IE ULI"
fields_desc = [ByteEnumField("ietype", 86, IEType),
ShortField("length", 0),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
BitField("SPARE", 0, 2),
BitField("LAI_Present", 0, 1),
BitField("ECGI_Present", 0, 1),
BitField("TAI_Present", 0, 1),
BitField("RAI_Present", 0, 1),
BitField("SAI_Present", 0, 1),
BitField("CGI_Present", 0, 1),
ConditionalField(
PacketField("SAI", 0, ULI_SAI), lambda pkt: bool(pkt.SAI_Present)),
ConditionalField(
PacketField("RAI", 0, ULI_RAI), lambda pkt: bool(pkt.RAI_Present)),
ConditionalField(
PacketField("TAI", 0, ULI_TAI), lambda pkt: bool(pkt.TAI_Present)),
ConditionalField(PacketField("ECGI", 0, ULI_ECGI),
lambda pkt: bool(pkt.ECGI_Present))]
class IE_FTEID(gtp.IE_Base):
name = "IE F-TEID"
fields_desc = [ByteEnumField("ietype", 87, IEType),
ShortField("length", 0),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
BitField("ipv4_present", 0, 1),
BitField("ipv6_present", 0, 1),
BitField("InterfaceType", 0, 6),
XIntField("GRE_Key", 0),
ConditionalField(
IPField("ipv4", RandIP()), lambda pkt: pkt.ipv4_present),
ConditionalField(XBitField("ipv6", "2001::", 128),
lambda pkt: pkt.ipv6_present)]
class IE_BearerContext(gtp.IE_Base):
name = "IE Bearer Context"
fields_desc = [ByteEnumField("ietype", 93, IEType),
ShortField("length", 0),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
PacketListField("IE_list", None, IE_Dispatcher,
length_from=lambda pkt: pkt.length)]
class IE_NotImplementedTLV(gtp.IE_Base):
name = "IE not implemented"
fields_desc = [ByteEnumField("ietype", 0, IEType),
ShortField("length", None),
StrLenField("data", "", length_from=lambda x: x.length)]
class IE_IMSI(gtp.IE_Base):
name = "IE IMSI"
fields_desc = [ByteEnumField("ietype", 1, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
gtp.TBCDByteField("IMSI", "33607080910",
length_from=lambda x: x.length)]
class IE_Cause(gtp.IE_Base):
name = "IE Cause"
fields_desc = [ByteEnumField("ietype", 2, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
ByteEnumField("Cause", 1, CauseValues),
BitField("SPARE", 0, 5),
BitField("PCE", 0, 1),
BitField("BCE", 0, 1),
BitField("CS", 0, 1)]
class IE_RecoveryRestart(gtp.IE_Base):
name = "IE Recovery Restart"
fields_desc = [ByteEnumField("ietype", 3, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
ByteField("restart_counter", 0)]
class IE_APN(gtp.IE_Base):
name = "IE APN"
fields_desc = [ByteEnumField("ietype", 71, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
gtp.APNStrLenField("APN", "internet",
length_from=lambda x: x.length)]
class IE_AMBR(gtp.IE_Base):
name = "IE AMBR"
fields_desc = [ByteEnumField("ietype", 72, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
IntField("AMBR_Uplink", 0),
IntField("AMBR_Downlink", 0)]
class IE_MSISDN(gtp.IE_Base):
name = "IE MSISDN"
fields_desc = [ByteEnumField("ietype", 76, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
gtp.TBCDByteField("digits", "33123456789",
length_from=lambda x: x.length)]
class IE_Indication(gtp.IE_Base):
name = "IE Cause"
fields_desc = [ByteEnumField("ietype", 77, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
BitField("DAF", 0, 1),
BitField("DTF", 0, 1),
BitField("HI", 0, 1),
BitField("DFI", 0, 1),
BitField("OI", 0, 1),
BitField("ISRSI", 0, 1),
BitField("ISRAI", 0, 1),
BitField("SGWCI", 0, 1),
BitField("SQCI", 0, 1),
BitField("UIMSI", 0, 1),
BitField("CFSI", 0, 1),
BitField("CRSI", 0, 1),
BitField("PS", 0, 1),
BitField("PT", 0, 1),
BitField("SI", 0, 1),
BitField("MSV", 0, 1),
ConditionalField(
BitField("RetLoc", 0, 1), lambda pkt: pkt.length > 2),
ConditionalField(
BitField("PBIC", 0, 1), lambda pkt: pkt.length > 2),
ConditionalField(
BitField("SRNI", 0, 1), lambda pkt: pkt.length > 2),
ConditionalField(
BitField("S6AF", 0, 1), lambda pkt: pkt.length > 2),
ConditionalField(
BitField("S4AF", 0, 1), lambda pkt: pkt.length > 2),
ConditionalField(
BitField("MBMDT", 0, 1), lambda pkt: pkt.length > 2),
ConditionalField(
BitField("ISRAU", 0, 1), lambda pkt: pkt.length > 2),
ConditionalField(
BitField("CCRSI", 0, 1), lambda pkt: pkt.length > 2),
ConditionalField(
BitField("CPRAI", 0, 1), lambda pkt: pkt.length > 3),
ConditionalField(
BitField("ARRL", 0, 1), lambda pkt: pkt.length > 3),
ConditionalField(
BitField("PPOFF", 0, 1), lambda pkt: pkt.length > 3),
ConditionalField(
BitField("PPON", 0, 1), lambda pkt: pkt.length > 3),
ConditionalField(
BitField("PPSI", 0, 1), lambda pkt: pkt.length > 3),
ConditionalField(
BitField("CSFBI", 0, 1), lambda pkt: pkt.length > 3),
ConditionalField(
BitField("CLII", 0, 1), lambda pkt: pkt.length > 3),
ConditionalField(
BitField("CPSR", 0, 1), lambda pkt: pkt.length > 3),
]
PDN_TYPES = {
1: "IPv4",
2: "IPv6",
3: "IPv4/IPv6",
}
PCO_OPTION_TYPES = {
3: "IPv4",
129: "Primary DNS Server IP address",
130: "Primary NBNS Server IP address",
131: "Secondary DNS Server IP address",
132: "Secondary NBNS Server IP address",
}
class PCO_Option(Packet):
def extract_padding(self, pkt):
return "", pkt
class PCO_IPv4(PCO_Option):
name = "IPv4"
fields_desc = [ByteEnumField("type", None, PCO_OPTION_TYPES),
ByteField("length", 0),
IPField("address", RandIP())]
class PCO_Primary_DNS(PCO_Option):
name = "Primary DNS Server IP Address"
fields_desc = [ByteEnumField("type", None, PCO_OPTION_TYPES),
ByteField("length", 0),
IPField("address", RandIP())]
class PCO_Primary_NBNS(PCO_Option):
name = "Primary DNS Server IP Address"
fields_desc = [ByteEnumField("type", None, PCO_OPTION_TYPES),
ByteField("length", 0),
IPField("address", RandIP())]
class PCO_Secondary_DNS(PCO_Option):
name = "Secondary DNS Server IP Address"
fields_desc = [ByteEnumField("type", None, PCO_OPTION_TYPES),
ByteField("length", 0),
IPField("address", RandIP())]
class PCO_Secondary_NBNS(PCO_Option):
name = "Secondary NBNS Server IP Address"
fields_desc = [ByteEnumField("type", None, PCO_OPTION_TYPES),
ByteField("length", 0),
IPField("address", RandIP())]
PCO_PROTOCOL_TYPES = {
0x0001: 'P-CSCF IPv6 Address Request',
0x0003: 'DNS Server IPv6 Address Request',
0x0005: 'MS Support of Network Requested Bearer Control indicator',
0x000a: 'IP Allocation via NAS',
0x000d: 'DNS Server IPv4 Address Request',
0x000c: 'P-CSCF IPv4 Address Request',
0x0010: 'IPv4 Link MTU Request',
0x8021: 'IPCP',
0xc023: 'Password Authentification Protocol',
0xc223: 'Challenge Handshake Authentication Protocol',
}
PCO_OPTION_CLASSES = {
3: PCO_IPv4,
129: PCO_Primary_DNS,
130: PCO_Primary_NBNS,
131: PCO_Secondary_DNS,
132: PCO_Secondary_NBNS,
}
def PCO_option_dispatcher(s):
"""Choose the correct PCO element."""
option = orb(s[0])
cls = PCO_OPTION_CLASSES.get(option, Raw)
return cls(s)
def len_options(pkt):
return pkt.length - 4 if pkt.length else 0
class PCO_P_CSCF_IPv6_Address_Request(PCO_Option):
name = "PCO PCO-P CSCF IPv6 Address Request"
fields_desc = [ShortEnumField("type", None, PCO_PROTOCOL_TYPES),
ByteField("length", 0),
ConditionalField(XBitField("address",
"2001:db8:0:42::", 128),
lambda pkt: pkt.length)]
class PCO_DNS_Server_IPv6(PCO_Option):
name = "PCO DNS Server IPv6 Address Request"
fields_desc = [ShortEnumField("type", None, PCO_PROTOCOL_TYPES),
ByteField("length", 0),
ConditionalField(XBitField("address",
"2001:db8:0:42::", 128),
lambda pkt: pkt.length)]
class PCO_SOF(PCO_Option):
name = "PCO MS Support of Network Requested Bearer Control indicator"
fields_desc = [ShortEnumField("type", None, PCO_PROTOCOL_TYPES),
ByteField("length", 0),
]
class PCO_PPP(PCO_Option):
name = "PPP IP Control Protocol"
fields_desc = [ByteField("Code", 0),
ByteField("Identifier", 0),
ShortField("length", 0),
PacketListField("Options", None, PCO_option_dispatcher,
length_from=len_options)]
def extract_padding(self, pkt):
return "", pkt
class PCO_IP_Allocation_via_NAS(PCO_Option):
name = "PCO IP Address allocation via NAS Signaling"
fields_desc = [ShortEnumField("type", None, PCO_PROTOCOL_TYPES),
ByteField("length", 0),
PacketListField("Options", None, PCO_option_dispatcher,
length_from=len_options)]
class PCO_P_CSCF_IPv4_Address_Request(PCO_Option):
name = "PCO PCO-P CSCF IPv4 Address Request"
fields_desc = [ShortEnumField("type", None, PCO_PROTOCOL_TYPES),
ByteField("length", 0),
ConditionalField(IPField("address", RandIP()),
lambda pkt: pkt.length)]
class PCO_DNS_Server_IPv4(PCO_Option):
name = "PCO DNS Server IPv4 Address Request"
fields_desc = [ShortEnumField("type", None, PCO_PROTOCOL_TYPES),
ByteField("length", 0),
ConditionalField(IPField("address", RandIP()),
lambda pkt: pkt.length)]
class PCO_IPv4_Link_MTU_Request(PCO_Option):
name = "PCO IPv4 Link MTU Request"
fields_desc = [ShortEnumField("type", None, PCO_PROTOCOL_TYPES),
ByteField("length", 0),
ConditionalField(ShortField("MTU_size", 1500),
lambda pkt: pkt.length)]
class PCO_IPCP(PCO_Option):
name = "PCO Internet Protocol Control Protocol"
fields_desc = [ShortEnumField("type", None, PCO_PROTOCOL_TYPES),
ByteField("length", 0),
PacketField("PPP", None, PCO_PPP)]
class PCO_PPP_Auth(PCO_Option):
name = "PPP Password Authentification Protocol"
fields_desc = [ByteField("Code", 0),
ByteField("Identifier", 0),
ShortField("length", 0),
ByteField("PeerID_length", 0),
ConditionalField(StrFixedLenField(
"PeerID",
"",
length_from=lambda pkt: pkt.PeerID_length),
lambda pkt: pkt.PeerID_length),
ByteField("Password_length", 0),
ConditionalField(
StrFixedLenField(
"Password",
"",
length_from=lambda pkt: pkt.Password_length),
lambda pkt: pkt.Password_length)]
class PCO_PasswordAuthentificationProtocol(PCO_Option):
name = "PCO Password Authentification Protocol"
fields_desc = [ShortEnumField("type", None, PCO_PROTOCOL_TYPES),
ByteField("length", 0),
PacketField("PPP", None, PCO_PPP_Auth)]
class PCO_PPP_Challenge(PCO_Option):
name = "PPP Password Authentification Protocol"
fields_desc = [ByteField("Code", 0),
ByteField("Identifier", 0),
ShortField("length", 0),
ByteField("value_size", 0),
ConditionalField(StrFixedLenField(
"value", "",
length_from=lambda pkt: pkt.value_size),
lambda pkt: pkt.value_size),
ConditionalField(StrFixedLenField(
"name", "",
length_from=lambda pkt: pkt.length - pkt.value_size - 5), # noqa: E501
lambda pkt: pkt.length)]
class PCO_ChallengeHandshakeAuthenticationProtocol(PCO_Option):
name = "PCO Password Authentification Protocol"
fields_desc = [ShortEnumField("type", None, PCO_PROTOCOL_TYPES),
ByteField("length", 0),
PacketField("PPP", None, PCO_PPP_Challenge)]
PCO_PROTOCOL_CLASSES = {
0x0001: PCO_P_CSCF_IPv6_Address_Request,
0x0003: PCO_DNS_Server_IPv6,
0x0005: PCO_SOF,
0x000a: PCO_IP_Allocation_via_NAS,
0x000c: PCO_P_CSCF_IPv4_Address_Request,
0x000d: PCO_DNS_Server_IPv4,
0x0010: PCO_IPv4_Link_MTU_Request,
0x8021: PCO_IPCP,
0xc023: PCO_PasswordAuthentificationProtocol,
0xc223: PCO_ChallengeHandshakeAuthenticationProtocol,
}
def PCO_protocol_dispatcher(s):
"""Choose the correct PCO element."""
proto_num = orb(s[0]) * 256 + orb(s[1])
cls = PCO_PROTOCOL_CLASSES.get(proto_num, Raw)
return cls(s)
class IE_PCO(gtp.IE_Base):
name = "IE Protocol Configuration Options"
fields_desc = [ByteEnumField("ietype", 78, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
BitField("Extension", 0, 1),
BitField("SPARE", 0, 4),
BitField("PPP", 0, 3),
PacketListField("Protocols", None, PCO_protocol_dispatcher,
length_from=lambda pkt: pkt.length - 1)]
class IE_PAA(gtp.IE_Base):
name = "IE PAA"
fields_desc = [ByteEnumField("ietype", 79, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
BitField("SPARE", 0, 5),
BitEnumField("PDN_type", None, 3, PDN_TYPES),
ConditionalField(
ByteField("ipv6_prefix_length", 8),
lambda pkt: pkt.PDN_type in (2, 3)),
ConditionalField(
XBitField("ipv6", "2001:db8:0:42::", 128),
lambda pkt: pkt.PDN_type in (2, 3)),
ConditionalField(
IPField("ipv4", 0), lambda pkt: pkt.PDN_type in (1, 3)),
]
class IE_Bearer_QoS(gtp.IE_Base):
name = "IE Bearer Quality of Service"
fields_desc = [ByteEnumField("ietype", 80, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
BitField("SPARE", 0, 1),
BitField("PCI", 0, 1),
BitField("PriorityLevel", 0, 4),
BitField("SPARE", 0, 1),
BitField("PVI", 0, 1),
ByteField("QCI", 0),
BitField("MaxBitRateForUplink", 0, 40),
BitField("MaxBitRateForDownlink", 0, 40),
BitField("GuaranteedBitRateForUplink", 0, 40),
BitField("GuaranteedBitRateForDownlink", 0, 40)]
class IE_ChargingID(gtp.IE_Base):
name = "IE Charging ID"
fields_desc = [ByteEnumField("ietype", 94, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
IntField("ChargingID", 0)]
class IE_ChargingCharacteristics(gtp.IE_Base):
name = "IE Charging ID"
fields_desc = [ByteEnumField("ietype", 95, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
XShortField("ChargingCharacteristric", 0)]
class IE_PDN_type(gtp.IE_Base):
name = "IE PDN Type"
fields_desc = [ByteEnumField("ietype", 99, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
BitField("SPARE", 0, 5),
BitEnumField("PDN_type", None, 3, PDN_TYPES)]
class IE_UE_Timezone(gtp.IE_Base):
name = "IE UE Time zone"
fields_desc = [ByteEnumField("ietype", 114, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
ByteField("Timezone", 0),
ByteField("DST", 0)]
class IE_Port_Number(gtp.IE_Base):
name = "IE Port Number"
fields_desc = [ByteEnumField("ietype", 126, IEType),
ShortField("length", 2),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
ShortField("PortNumber", RandShort())]
class IE_APN_Restriction(gtp.IE_Base):
name = "IE APN Restriction"
fields_desc = [ByteEnumField("ietype", 127, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
ByteField("APN_Restriction", 0)]
class IE_SelectionMode(gtp.IE_Base):
name = "IE Selection Mode"
fields_desc = [ByteEnumField("ietype", 128, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
BitField("SPARE", 0, 6),
BitField("SelectionMode", 0, 2)]
class IE_MMBR(gtp.IE_Base):
name = "IE Max MBR/APN-AMBR (MMBR)"
fields_desc = [ByteEnumField("ietype", 72, IEType),
ShortField("length", None),
BitField("CR_flag", 0, 4),
BitField("instance", 0, 4),
IntField("uplink_rate", 0),
IntField("downlink_rate", 0)]
ietypecls = {1: IE_IMSI,
2: IE_Cause,
3: IE_RecoveryRestart,
71: IE_APN,
72: IE_AMBR,
73: IE_EPSBearerID,
74: IE_IPv4,
75: IE_MEI,
76: IE_MSISDN,
77: IE_Indication,
78: IE_PCO,
79: IE_PAA,
80: IE_Bearer_QoS,
82: IE_RAT,
83: IE_ServingNetwork,
86: IE_ULI,
87: IE_FTEID,
93: IE_BearerContext,
94: IE_ChargingID,
95: IE_ChargingCharacteristics,
99: IE_PDN_type,
114: IE_UE_Timezone,
126: IE_Port_Number,
127: IE_APN_Restriction,
128: IE_SelectionMode,
161: IE_MMBR}
#
# GTPv2 Commands
# 3GPP TS 29.060 V9.1.0 (2009-12)
#
class GTPV2Command(Packet):
fields_desc = [PacketListField("IE_list", None, IE_Dispatcher)]
class GTPV2EchoRequest(GTPV2Command):
name = "GTPv2 Echo Request"
class GTPV2EchoResponse(GTPV2Command):
name = "GTPv2 Echo Response"
class GTPV2CreateSessionRequest(GTPV2Command):
name = "GTPv2 Create Session Request"
class GTPV2CreateSessionResponse(GTPV2Command):
name = "GTPv2 Create Session Response"
class GTPV2DeleteSessionRequest(GTPV2Command):
name = "GTPv2 Delete Session Request"
class GTPV2DeleteSessionResponse(GTPV2Command):
name = "GTPv2 Delete Session Request"
class GTPV2ModifyBearerCommand(GTPV2Command):
name = "GTPv2 Modify Bearer Command"
class GTPV2ModifyBearerFailureNotification(GTPV2Command):
name = "GTPv2 Modify Bearer Command"
class GTPV2DownlinkDataNotifFailureIndication(GTPV2Command):
name = "GTPv2 Downlink Data Notification Failure Indication"
class GTPV2ModifyBearerRequest(GTPV2Command):
name = "GTPv2 Modify Bearer Request"
class GTPV2ModifyBearerResponse(GTPV2Command):
name = "GTPv2 Modify Bearer Response"
class GTPV2UpdateBearerRequest(GTPV2Command):
name = "GTPv2 Update Bearer Request"
class GTPV2UpdateBearerResponse(GTPV2Command):
name = "GTPv2 Update Bearer Response"
class GTPV2DeleteBearerRequest(GTPV2Command):
name = "GTPv2 Delete Bearer Request"
class GTPV2SuspendNotification(GTPV2Command):
name = "GTPv2 Suspend Notification"
class GTPV2SuspendAcknowledge(GTPV2Command):
name = "GTPv2 Suspend Acknowledge"
class GTPV2ResumeNotification(GTPV2Command):
name = "GTPv2 Resume Notification"
class GTPV2ResumeAcknowledge(GTPV2Command):
name = "GTPv2 Resume Acknowledge"
class GTPV2DeleteBearerResponse(GTPV2Command):
name = "GTPv2 Delete Bearer Response"
class GTPV2CreateIndirectDataForwardingTunnelRequest(GTPV2Command):
name = "GTPv2 Create Indirect Data Forwarding Tunnel Request"
class GTPV2CreateIndirectDataForwardingTunnelResponse(GTPV2Command):
name = "GTPv2 Create Indirect Data Forwarding Tunnel Response"
class GTPV2DeleteIndirectDataForwardingTunnelRequest(GTPV2Command):
name = "GTPv2 Delete Indirect Data Forwarding Tunnel Request"
class GTPV2DeleteIndirectDataForwardingTunnelResponse(GTPV2Command):
name = "GTPv2 Delete Indirect Data Forwarding Tunnel Response"
class GTPV2ReleaseBearerRequest(GTPV2Command):
name = "GTPv2 Release Bearer Request"
class GTPV2ReleaseBearerResponse(GTPV2Command):
name = "GTPv2 Release Bearer Response"
class GTPV2DownlinkDataNotif(GTPV2Command):
name = "GTPv2 Download Data Notification"
class GTPV2DownlinkDataNotifAck(GTPV2Command):
name = "GTPv2 Download Data Notification Acknowledgment"
bind_layers(GTPHeader, GTPV2EchoRequest, gtp_type=1, T=0)
bind_layers(GTPHeader, GTPV2EchoResponse, gtp_type=2, T=0)
bind_layers(GTPHeader, GTPV2CreateSessionRequest, gtp_type=32)
bind_layers(GTPHeader, GTPV2CreateSessionResponse, gtp_type=33)
bind_layers(GTPHeader, GTPV2ModifyBearerRequest, gtp_type=34)
bind_layers(GTPHeader, GTPV2ModifyBearerResponse, gtp_type=35)
bind_layers(GTPHeader, GTPV2DeleteSessionRequest, gtp_type=36)
bind_layers(GTPHeader, GTPV2DeleteSessionResponse, gtp_type=37)
bind_layers(GTPHeader, GTPV2ModifyBearerCommand, gtp_type=64)
bind_layers(GTPHeader, GTPV2ModifyBearerFailureNotification, gtp_type=65)
bind_layers(GTPHeader, GTPV2DownlinkDataNotifFailureIndication, gtp_type=70)
bind_layers(GTPHeader, GTPV2UpdateBearerRequest, gtp_type=97)
bind_layers(GTPHeader, GTPV2UpdateBearerResponse, gtp_type=98)
bind_layers(GTPHeader, GTPV2DeleteBearerRequest, gtp_type=99)
bind_layers(GTPHeader, GTPV2DeleteBearerResponse, gtp_type=100)
bind_layers(GTPHeader, GTPV2SuspendNotification, gtp_type=162)
bind_layers(GTPHeader, GTPV2SuspendAcknowledge, gtp_type=163)
bind_layers(GTPHeader, GTPV2ResumeNotification, gtp_type=164)
bind_layers(GTPHeader, GTPV2ResumeAcknowledge, gtp_type=165)
bind_layers(
GTPHeader, GTPV2CreateIndirectDataForwardingTunnelRequest, gtp_type=166)
bind_layers(
GTPHeader, GTPV2CreateIndirectDataForwardingTunnelResponse, gtp_type=167)
bind_layers(
GTPHeader, GTPV2DeleteIndirectDataForwardingTunnelRequest, gtp_type=168)
bind_layers(
GTPHeader, GTPV2DeleteIndirectDataForwardingTunnelResponse, gtp_type=169)
bind_layers(GTPHeader, GTPV2ReleaseBearerRequest, gtp_type=170)
bind_layers(GTPHeader, GTPV2ReleaseBearerResponse, gtp_type=171)
bind_layers(GTPHeader, GTPV2DownlinkDataNotif, gtp_type=176)
bind_layers(GTPHeader, GTPV2DownlinkDataNotifAck, gtp_type=177)
| 1 | 13,620 | I looked up that one, and it seems correct: Authentification is French, Authentication seems to be correct | secdev-scapy | py |
@@ -22,9 +22,13 @@
"""
Plot class used in monitor mixin framework.
"""
-import matplotlib.pyplot as plt
-import matplotlib.cm as colorModel
+import traceback
+try:
+ # We import in here to avoid creating a matplotlib dependency in nupic.
+ import matplotlib.pyplot as plt
+except ImportError:
+ print traceback.format_exc() + "\n"
class Plot(object): | 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot class used in monitor mixin framework.
"""
import matplotlib.pyplot as plt
import matplotlib.cm as colorModel
class Plot(object):
def __init__(self, monitor, title):
"""
@param monitor (MonitorMixinBase) Monitor Mixin instance that generated
this plot
@param title (string) Plot title
"""
self._monitor = monitor
self._title = title
self._fig = self._initFigure()
plt.ion()
plt.show()
def _initFigure(self):
fig = plt.figure()
fig.suptitle(self._prettyPrintTitle())
return fig
def _prettyPrintTitle(self):
if self._monitor.mmName is not None:
return "[{0}] {1}".format(self._monitor.mmName, self._title)
return self._title
def addGraph(self, data, position=111, xlabel=None, ylabel=None):
""" Adds a graph to the plot's figure.
@param data See matplotlib.Axes.plot documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.plot(data)
plt.draw()
def addHistogram(self, data, position=111, xlabel=None, ylabel=None,
bins=None):
""" Adds a histogram to the plot's figure.
@param data See matplotlib.Axes.hist documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.hist(data, bins=bins, color="green", alpha=0.8)
plt.draw()
def add2DArray(self, data, position=111, xlabel=None, ylabel=None,
cmap=colorModel.Greys, aspect="auto", interpolation="nearest"):
""" Adds an image to the plot's figure.
@param data a 2D array. See matplotlib.Axes.imshow documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@param cmap color map used in the rendering
@param aspect how aspect ratio is handled during resize
@param interpolation interpolation method
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.imshow(data, cmap=cmap, aspect=aspect, interpolation=interpolation)
plt.draw()
def _addBase(self, position, xlabel=None, ylabel=None):
""" Adds a subplot to the plot's figure at specified position.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@returns (matplotlib.Axes) Axes instance
"""
ax = self._fig.add_subplot(position)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
| 1 | 17,306 | Can we print a more human-friendly warning here, saying that plotting won't work or something along those lines? | numenta-nupic | py |
@@ -1,13 +1,15 @@
import html
import time
from typing import Optional, Tuple
-from mitmproxy import connections
+
from mitmproxy import flow
from mitmproxy import version
from mitmproxy.net import http
+from mitmproxy.proxy import context
HTTPRequest = http.Request
HTTPResponse = http.Response
+HTTPMessage = http.Message
class HTTPFlow(flow.Flow): | 1 | import html
import time
from typing import Optional, Tuple
from mitmproxy import connections
from mitmproxy import flow
from mitmproxy import version
from mitmproxy.net import http
HTTPRequest = http.Request
HTTPResponse = http.Response
class HTTPFlow(flow.Flow):
"""
An HTTPFlow is a collection of objects representing a single HTTP
transaction.
"""
request: HTTPRequest
response: Optional[HTTPResponse] = None
error: Optional[flow.Error] = None
"""
Note that it's possible for a Flow to have both a response and an error
object. This might happen, for instance, when a response was received
from the server, but there was an error sending it back to the client.
"""
server_conn: connections.ServerConnection
client_conn: connections.ClientConnection
intercepted: bool = False
""" Is this flow currently being intercepted? """
mode: str
""" What mode was the proxy layer in when receiving this request? """
def __init__(self, client_conn, server_conn, live=None, mode="regular"):
super().__init__("http", client_conn, server_conn, live)
self.mode = mode
_stateobject_attributes = flow.Flow._stateobject_attributes.copy()
# mypy doesn't support update with kwargs
_stateobject_attributes.update(dict(
request=HTTPRequest,
response=HTTPResponse,
mode=str
))
def __repr__(self):
s = "<HTTPFlow"
for a in ("request", "response", "error", "client_conn", "server_conn"):
if getattr(self, a, False):
s += "\r\n %s = {flow.%s}" % (a, a)
s += ">"
return s.format(flow=self)
@property
def timestamp_start(self) -> float:
return self.request.timestamp_start
def copy(self):
f = super().copy()
if self.request:
f.request = self.request.copy()
if self.response:
f.response = self.response.copy()
return f
def make_error_response(
status_code: int,
message: str = "",
headers: Optional[http.Headers] = None,
) -> HTTPResponse:
body: bytes = """
<html>
<head>
<title>{status_code} {reason}</title>
</head>
<body>
<h1>{status_code} {reason}</h1>
<p>{message}</p>
</body>
</html>
""".strip().format(
status_code=status_code,
reason=http.status_codes.RESPONSES.get(status_code, "Unknown"),
message=html.escape(message),
).encode("utf8", "replace")
if not headers:
headers = http.Headers(
Server=version.MITMPROXY,
Connection="close",
Content_Length=str(len(body)),
Content_Type="text/html"
)
return HTTPResponse.make(status_code, body, headers)
def make_connect_request(address: Tuple[str, int]) -> HTTPRequest:
return HTTPRequest(
host=address[0],
port=address[1],
method=b"CONNECT",
scheme=b"",
authority=f"{address[0]}:{address[1]}".encode(),
path=b"",
http_version=b"HTTP/1.1",
headers=http.Headers(),
content=b"",
trailers=None,
timestamp_start=time.time(),
timestamp_end=time.time(),
)
def make_connect_response(http_version):
# Do not send any response headers as it breaks proxying non-80 ports on
# Android emulators using the -http-proxy option.
return HTTPResponse(
http_version,
200,
b"Connection established",
http.Headers(),
b"",
None,
time.time(),
time.time(),
)
def make_expect_continue_response():
return HTTPResponse.make(100)
| 1 | 15,251 | @mhils didn't we try to get rid of these (now empty) wrapper classes? | mitmproxy-mitmproxy | py |
@@ -19,6 +19,7 @@ import (
log "github.com/sirupsen/logrus"
+ "github.com/projectcalico/felix/ipsets"
"github.com/projectcalico/libcalico-go/lib/set"
)
| 1 | // Copyright (c) 2017-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ipsets
import (
"strings"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/libcalico-go/lib/set"
)
type CallBackFunc func(ipSetId string)
// IPSets manages a whole plane of IP sets, i.e. all the IPv4 sets, or all the IPv6 IP sets.
type IPSets struct {
IPVersionConfig *IPVersionConfig
ipSetIDToIPSet map[string]*ipSet
logCxt *log.Entry
callbackOnUpdate CallBackFunc
}
func NewIPSets(ipVersionConfig *IPVersionConfig) *IPSets {
return &IPSets{
IPVersionConfig: ipVersionConfig,
ipSetIDToIPSet: map[string]*ipSet{},
logCxt: log.WithFields(log.Fields{
"family": ipVersionConfig.Family,
}),
}
}
func (s *IPSets) SetCallback(callback CallBackFunc) {
s.callbackOnUpdate = callback
}
// AddOrReplaceIPSet is responsible for the creation (or replacement) of an IP set in the store
func (s *IPSets) AddOrReplaceIPSet(setMetadata IPSetMetadata, members []string) {
log.WithFields(log.Fields{
"metadata": setMetadata,
"numMembers": len(members),
}).Info("Adding IP set to cache")
s.logCxt.WithFields(log.Fields{
"setID": setMetadata.SetID,
"setType": setMetadata.Type,
}).Info("Creating IP set")
filteredMembers := s.filterMembers(members)
// Create the IP set struct and stores it by id
setID := setMetadata.SetID
ipSet := &ipSet{
IPSetMetadata: setMetadata,
Members: filteredMembers,
}
s.ipSetIDToIPSet[setID] = ipSet
s.callbackOnUpdate(setID)
}
// RemoveIPSet is responsible for the removal of an IP set from the store
func (s *IPSets) RemoveIPSet(setID string) {
s.logCxt.WithField("setID", setID).Info("Removing IP set")
delete(s.ipSetIDToIPSet, setID)
s.callbackOnUpdate(setID)
}
// AddMembers adds a range of new members to an existing IP set in the store
func (s *IPSets) AddMembers(setID string, newMembers []string) {
if len(newMembers) == 0 {
return
}
ipSet := s.ipSetIDToIPSet[setID]
filteredMembers := s.filterMembers(newMembers)
if filteredMembers.Len() == 0 {
return
}
s.logCxt.WithFields(log.Fields{
"setID": setID,
"filteredMembers": filteredMembers,
}).Debug("Adding new members to IP set")
filteredMembers.Iter(func(m interface{}) error {
ipSet.Members.Add(m)
return nil
})
s.callbackOnUpdate(setID)
}
// RemoveMembers removes a range of members from an existing IP set in the store
func (s *IPSets) RemoveMembers(setID string, removedMembers []string) {
if len(removedMembers) == 0 {
return
}
ipSet := s.ipSetIDToIPSet[setID]
filteredMembers := s.filterMembers(removedMembers)
if filteredMembers.Len() == 0 {
return
}
s.logCxt.WithFields(log.Fields{
"setID": setID,
"filteredMembers": filteredMembers,
}).Debug("Removing members from IP set")
filteredMembers.Iter(func(m interface{}) error {
ipSet.Members.Discard(m)
return nil
})
s.callbackOnUpdate(setID)
}
// GetIPSetMembers returns all of the members for a given IP set
func (s *IPSets) GetIPSetMembers(setID string) []string {
var retVal []string
ipSet := s.ipSetIDToIPSet[setID]
if ipSet == nil {
return nil
}
ipSet.Members.Iter(func(item interface{}) error {
member := item.(string)
retVal = append(retVal, member)
return nil
})
// Note: It is very important that nil is returned if there is no ip in an ipset
// so that policy rules related to this ipset won't be populated.
return retVal
}
// filterMembers filters out any members which are not of the correct
// ip family for the IPSet
func (s *IPSets) filterMembers(members []string) set.Set {
filtered := set.New()
wantIPV6 := s.IPVersionConfig.Family == IPFamilyV6
for _, member := range members {
isIPV6 := strings.Contains(member, ":")
if wantIPV6 != isIPV6 {
continue
}
filtered.Add(member)
}
return filtered
}
func (s *IPSets) GetIPFamily() IPFamily {
return s.IPVersionConfig.Family
}
// The following functions are no-ops on Windows.
func (s *IPSets) QueueResync() {
}
func (m *IPSets) GetTypeOf(setID string) (IPSetType, error) {
panic("Not implemented")
}
func (m *IPSets) GetMembers(setID string) (set.Set, error) {
// GetMembers is only called from XDPState, and XDPState does not coexist with
// config.BPFEnabled.
panic("Not implemented")
}
func (m *IPSets) ApplyUpdates() {
}
func (m *IPSets) ApplyDeletions() {
}
func (s *IPSets) SetFilter(ipSetNames set.Set) {
// Not needed for Windows.
}
| 1 | 19,772 | I would avoid importing `felix/ipsets` package because this package is the windows equivalent and should be at the same level of `felix/ipsets`. We could add linux specific dependencies into `felix/ipsets` later and it will break Windows build. | projectcalico-felix | go |
@@ -137,4 +137,13 @@ public interface Catalog {
* @throws NoSuchTableException if the table does not exist
*/
Table loadTable(TableIdentifier identifier);
+
+ /**
+ * Register a table.
+ *
+ * @param identifier a table identifier
+ * @param metadataFileLocation the location of a metadata file
+ * @return a Table instance
+ */
+ Table registerTable(TableIdentifier identifier, String metadataFileLocation);
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.catalog;
import java.util.Map;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
import org.apache.iceberg.exceptions.AlreadyExistsException;
import org.apache.iceberg.exceptions.NoSuchTableException;
/**
* A Catalog API for table create, drop, and load operations.
*/
public interface Catalog {
/**
* Create a table.
*
* @param identifier a table identifier
* @param schema a schema
* @param spec a partition spec
* @param location a location for the table; leave null if unspecified
* @param properties a string map of table properties
* @return a Table instance
* @throws AlreadyExistsException if the table already exists
*/
Table createTable(
TableIdentifier identifier,
Schema schema,
PartitionSpec spec,
String location,
Map<String, String> properties);
/**
* Create a table.
*
* @param identifier a table identifier
* @param schema a schema
* @param spec a partition spec
* @param properties a string map of table properties
* @return a Table instance
* @throws AlreadyExistsException if the table already exists
*/
default Table createTable(
TableIdentifier identifier,
Schema schema,
PartitionSpec spec,
Map<String, String> properties) {
return createTable(identifier, schema, spec, null, properties);
}
/**
* Create a table.
*
* @param identifier a table identifier
* @param schema a schema
* @param spec a partition spec
* @return a Table instance
* @throws AlreadyExistsException if the table already exists
*/
default Table createTable(
TableIdentifier identifier,
Schema schema,
PartitionSpec spec) {
return createTable(identifier, schema, spec, null, null);
}
/**
* Create an unpartitioned table.
*
* @param identifier a table identifier
* @param schema a schema
* @return a Table instance
* @throws AlreadyExistsException if the table already exists
*/
default Table createTable(
TableIdentifier identifier,
Schema schema) {
return createTable(identifier, schema, PartitionSpec.unpartitioned(), null, null);
}
/**
* Check whether table exists.
*
* @param identifier a table identifier
* @return true if the table exists, false otherwise
*/
default boolean tableExists(TableIdentifier identifier) {
try {
loadTable(identifier);
return true;
} catch (NoSuchTableException e) {
return false;
}
}
/**
* Drop a table.
*
* @param identifier a table identifier
* @return true if the table was dropped, false if the table did not exist
*/
boolean dropTable(TableIdentifier identifier);
/**
* Rename a table.
*
* @param from identifier of the table to rename
* @param to new table name
* @throws NoSuchTableException if the table does not exist
*/
void renameTable(TableIdentifier from, TableIdentifier to);
/**
* Load a table.
*
* @param identifier a table identifier
* @return instance of {@link Table} implementation referred by {@code tableIdentifier}
* @throws NoSuchTableException if the table does not exist
*/
Table loadTable(TableIdentifier identifier);
}
| 1 | 14,266 | Maybe, `import` would be a better name. | apache-iceberg | java |
@@ -21,10 +21,11 @@ import java.util.List;
public class ProtoPagingParameters implements PagingParameters {
private static final String PARAMETER_PAGE_TOKEN = "page_token";
private static final String PARAMETER_NEXT_PAGE_TOKEN = "next_page_token";
- private static final String PARAMETER_MAX_RESULTS = "page_size";
+ private static final String PARAMETER_PAGE_SIZE = "page_size";
+ private static final String PARAMETER_MAX_RESULTS = "max_results";
private static final ImmutableList<String> IGNORED_PARAMETERS =
- ImmutableList.of(PARAMETER_PAGE_TOKEN, PARAMETER_MAX_RESULTS);
+ ImmutableList.of(PARAMETER_PAGE_TOKEN, PARAMETER_PAGE_SIZE);
@Override
public String getNameForPageToken() { | 1 | /* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.configgen;
import com.google.common.collect.ImmutableList;
import java.util.List;
/** Names of paging parameters used by protobuf-defined APIs. */
public class ProtoPagingParameters implements PagingParameters {
private static final String PARAMETER_PAGE_TOKEN = "page_token";
private static final String PARAMETER_NEXT_PAGE_TOKEN = "next_page_token";
private static final String PARAMETER_MAX_RESULTS = "page_size";
private static final ImmutableList<String> IGNORED_PARAMETERS =
ImmutableList.of(PARAMETER_PAGE_TOKEN, PARAMETER_MAX_RESULTS);
@Override
public String getNameForPageToken() {
return PARAMETER_PAGE_TOKEN;
}
@Override
public String getNameForPageSize() {
return PARAMETER_MAX_RESULTS;
}
@Override
public String getNameForNextPageToken() {
return PARAMETER_NEXT_PAGE_TOKEN;
}
@Override
public List<String> getIgnoredParameters() {
return IGNORED_PARAMETERS;
}
public static String nameForPageToken() {
return PARAMETER_PAGE_TOKEN;
}
public static String nameForPageSize() {
return PARAMETER_MAX_RESULTS;
}
public static String nameForNextPageToken() {
return PARAMETER_NEXT_PAGE_TOKEN;
}
}
| 1 | 30,952 | Wouldn't we want `PARAMETER_MAX_RESULTS` in here as well? (I might be miunderstanding how this is used) | googleapis-gapic-generator | java |
@@ -21,4 +21,8 @@ RSpec.configure do |config|
Capybara.default_host = "http://localhost:3000"
OmniAuth.config.test_mode = true
+
+ config.before(:suite) do
+ Role.ensure_system_roles_exist
+ end
end | 1 | ENV["RAILS_ENV"] ||= "test"
require "spec_helper"
require File.expand_path("../../config/environment", __FILE__)
require "rspec/rails"
require "shoulda/matchers"
Dir[Rails.root.join("spec/support/**/*.rb")].each { |f| require f }
RSpec.configure do |config|
config.fixture_path = "#{::Rails.root}/spec/support/fixtures"
config.use_transactional_fixtures = false
config.infer_spec_type_from_file_location!
config.include ControllerSpecHelper, type: :controller
config.include FeatureSpecHelper, type: :feature
config.include RequestSpecHelper, type: :request
[:feature, :request].each do |type|
config.include IntegrationSpecHelper, type: type
end
Capybara.default_host = "http://localhost:3000"
OmniAuth.config.test_mode = true
end
| 1 | 17,639 | We can re-use the config in our tests. | 18F-C2 | rb |
@@ -308,7 +308,7 @@ AC_MSG_RESULT([$LIBS])
# Dependencies that themselves have a pkg-config file available.
#
PC_REQUIRES=""
-AS_IF([test "$pmix_hwloc_support_will_build" = "yes"],
+AS_IF([test "$pmix_hwloc_source" != "cobuild"],
[PC_REQUIRES="$PC_REQUIRES hwloc"])
AS_IF([test $pmix_libevent_support -eq 1],
[PC_REQUIRES="$PC_REQUIRES libevent"]) | 1 | # -*- shell-script -*-
#
# Copyright (c) 2004-2009 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2010 The University of Tennessee and The University
# of Tennessee Research Foundation. All rights
# reserved.
# Copyright (c) 2004-2007 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2020 Cisco Systems, Inc. All rights reserved
# Copyright (c) 2006-2008 Sun Microsystems, Inc. All rights reserved.
# Copyright (c) 2006-2017 Los Alamos National Security, LLC. All rights
# reserved.
# Copyright (c) 2009 Oak Ridge National Labs. All rights reserved.
# Copyright (c) 2011-2013 NVIDIA Corporation. All rights reserved.
# Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2013 Mellanox Technologies, Inc.
# All rights reserved.
# Copyright (c) 2014-2020 Intel, Inc. All rights reserved.
# Copyright (c) 2016 IBM Corporation. All rights reserved.
# Copyright (c) 2016-2018 Research Organization for Information Science
# and Technology (RIST). All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
############################################################################
# Initialization, version number, and other random setup/init stuff
############################################################################
# Load in everything found by autogen.pl
m4_include([config/autogen_found_items.m4])
# We don't have the version number to put in here yet, and we can't
# call PMIX_GET_VERSION (etc.) before AC_INIT. So use the shell
# version.
AC_INIT([pmix],
[m4_normalize(esyscmd([config/pmix_get_version.sh VERSION --tarball]))],
[https://github.com/pmix/pmix/issues], [pmix])
AC_PREREQ(2.69)
AC_CONFIG_AUX_DIR(./config)
# Note that this directory must *exactly* match what was specified via
# -I in ACLOCAL_AMFLAGS in the top-level Makefile.am.
AC_CONFIG_MACRO_DIR(./config)
# autotools expects to perform tests without interference
# from user-provided CFLAGS, particularly -Werror flags.
# Search for them here and cache any we find
PMIX_CFLAGS_cache=
PMIX_CFLAGS_pass=
for val in $CFLAGS; do
if echo "$val" | grep -q -e "-W"; then
PMIX_CFLAGS_cache="$PMIX_CFLAGS_cache $val";
else
PMIX_CFLAGS_pass="$PMIX_CFLAGS_pass $val";
fi
done
CFLAGS=$PMIX_CFLAGS_pass
PMIX_CAPTURE_CONFIGURE_CLI([PMIX_CONFIGURE_CLI])
# Get our platform support file. This has to be done very, very early
# because it twiddles random bits of autoconf
PMIX_LOAD_PLATFORM
PMIX_TOP_BUILDDIR="`pwd`"
AC_SUBST(PMIX_TOP_BUILDDIR)
top_buildir=`pwd`
cd "$srcdir"
PMIX_TOP_SRCDIR="`pwd`"
top_srcdir=$PMIX_TOP_SRCDIR
AC_SUBST(PMIX_TOP_SRCDIR)
cd "$PMIX_TOP_BUILDDIR"
AC_MSG_NOTICE([builddir: $PMIX_TOP_BUILDDIR])
AC_MSG_NOTICE([srcdir: $PMIX_TOP_SRCDIR])
if test "$PMIX_TOP_BUILDDIR" != "$PMIX_TOP_SRCDIR"; then
AC_MSG_NOTICE([Detected VPATH build])
fi
# setup configure options (e.g., show_title and friends)
PMIX_CONFIGURE_SETUP
pmix_show_title "Configuring PMIx"
# This must be before AM_INIT_AUTOMAKE
AC_CANONICAL_TARGET
# Init automake
AM_INIT_AUTOMAKE([foreign dist-bzip2 subdir-objects no-define 1.13.4])
# SILENT_RULES is new in AM 1.11, but we require 1.13.4 or higher via
# autogen. Limited testing shows that calling SILENT_RULES directly
# works in more cases than adding "silent-rules" to INIT_AUTOMAKE
# (even though they're supposed to be identical). Shrug.
AM_SILENT_RULES([yes])
# set the language
AC_LANG([C])
# AC_USE_SYSTEM_EXTENSIONS will modify CFLAGS if nothing was in there
# beforehand. We don't want that. So if there was nothing in
# CFLAGS, put nothing back in there.
PMIX_VAR_SCOPE_PUSH([CFLAGS_save])
CFLAGS_save=$CFLAGS
AC_USE_SYSTEM_EXTENSIONS
AS_IF([test -z "$CFLAGS_save"], [CFLAGS=])
PMIX_VAR_SCOPE_POP
# Sanity checks
AC_DEFUN([PMIX_CHECK_DIR_FOR_SPACES],[
dir="$1"
article="$2"
label="$3"
AC_MSG_CHECKING([directory of $label])
AC_MSG_RESULT([$dir])
AS_IF([test -n "`echo $dir | grep ' '`"],
[AC_MSG_WARN([This version of OpenPMIx does not support $article $label])
AC_MSG_WARN([with a path that contains spaces])
AC_MSG_ERROR([Cannot continue.])])
])
AC_DEFUN([PMIX_CANONICALIZE_PATH],[
case $host_os in
darwin*)
# MacOS does not have "readlink -f" or realpath (at least as
# of MacOS Cataline / 10.15). Instead, use Python, because we
# know MacOS comes with a /usr/bin/python that has
# os.path.realpath.
$2=`/usr/bin/python -c 'import os; print os.path.realpath("'$1'")'`
;;
*)
$2=`readlink -f $1`
;;
esac
])
PMIX_VAR_SCOPE_PUSH(pmix_checkdir)
PMIX_CHECK_DIR_FOR_SPACES([$srcdir], [a], [source tree])
PMIX_CANONICALIZE_PATH([$srcdir], [pmix_checkdir])
PMIX_CHECK_DIR_FOR_SPACES([$pmix_checkdir], [an], [absolute source tree])
PMIX_CANONICALIZE_PATH([.], [pmix_checkdir])
PMIX_CHECK_DIR_FOR_SPACES([$pmix_checkdir], [a], [build tree])
PMIX_CHECK_DIR_FOR_SPACES([$prefix], [a], [prefix])
PMIX_CANONICALIZE_PATH([$prefix], [pmix_checkdir])
PMIX_CHECK_DIR_FOR_SPACES([$pmix_checkdir], [an], [absolute prefix])
PMIX_VAR_SCOPE_POP
####################################################################
# Setup the configure header files
####################################################################
AH_TOP([/* -*- c -*-
*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2013-2015 Intel, Inc. All rights reserved
* Copyright (c) 2016 IBM Corporation. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
* This file is automatically generated by configure. Edits will be lost
* the next time you run configure!
*/
#ifndef PMIX_CONFIG_H
#define PMIX_CONFIG_H
#include "src/include/pmix_config_top.h"
])
AH_BOTTOM([
#include "src/include/pmix_config_bottom.h"
#endif /* PMIX_CONFIG_H */
])
############################################################################
# Setup Libtool
############################################################################
# We want new Libtool. None of that old stuff. Pfft.
m4_ifdef([LT_PREREQ], [],
[m4_fatal([libtool version 2.2.6 or higher is required], [63])])
LT_PREREQ([2.2.6])
#
# Enable static so that we have the --with tests done up here and can
# check for OS. Save the values of $enable_static and $enable_shared
# before setting the defaults, because if the user specified
# --[en|dis]able-[static|shared] on the command line, they'll already
# be set. In this way, we can tell if the user requested something or
# if the default was set here.
#
pmix_enable_shared="$enable_shared"
pmix_enable_static="$enable_static"
AS_IF([test ! -z "$enable_static" && test "$enable_static" = "yes"],
[CFLAGS="$CFLAGS -fPIC"])
AM_ENABLE_SHARED
AM_DISABLE_STATIC
PMIX_SETUP_WRAPPER_INIT
# This did not exist pre AM 1.11.x (where x is somewhere >0 and <3),
# but it is necessary in AM 1.12.x.
m4_ifdef([AM_PROG_AR], [AM_PROG_AR])
AM_PROG_LEX
############################################################################
# Configuration options
############################################################################
# Set the MCA prefix
PMIX_SET_MCA_PREFIX([PMIX_MCA_])
PMIX_SET_MCA_CMD_LINE_ID([pmca])
# Define PMIx configure arguments
PMIX_DEFINE_ARGS
# Define some basic useful values
PMIX_BASIC_SETUP
# If debug mode, add -g
AS_IF([test "$pmix_debug" = "1"],
[CFLAGS="$CFLAGS -g"])
LT_INIT()
LT_LANG([C])
############################################################################
# Setup the core
############################################################################
# Setup the pmix core
PMIX_SETUP_CORE()
# Run the AM_CONDITIONALs
PMIX_DO_AM_CONDITIONALS
####################################################################
# Setup C compiler
####################################################################
AC_ARG_VAR(CC_FOR_BUILD,[build system C compiler])
AS_IF([test -z "$CC_FOR_BUILD"],[
AC_SUBST([CC_FOR_BUILD], [$CC])
])
# restore any user-provided Werror flags
AS_IF([test ! -z "$PMIX_CFLAGS_cache"], [CFLAGS="$CFLAGS $PMIX_CFLAGS_cache"])
# Delay setting pickyness until here so we
# don't break configure code tests
#if test "$WANT_PICKY_COMPILER" = "1"; then
# CFLAGS="$CFLAGS -Wall -Wextra -Werror"
#fi
# Cleanup duplicate flags
PMIX_FLAGS_UNIQ(CFLAGS)
PMIX_FLAGS_UNIQ(CPPFLAGS)
PMIX_FLAGS_UNIQ(LDFLAGS)
PMIX_FLAGS_UNIQ(LIBS)
#
# Delayed the substitution of CFLAGS and CXXFLAGS until now because
# they may have been modified throughout the course of this script.
#
AC_SUBST(CFLAGS)
AC_SUBST(CPPFLAGS)
pmix_show_title "Final compiler flags"
AC_MSG_CHECKING([final CPPFLAGS])
AC_MSG_RESULT([$CPPFLAGS])
AC_MSG_CHECKING([final CFLAGS])
AC_MSG_RESULT([$CFLAGS])
AC_MSG_CHECKING([final LDFLAGS])
AC_MSG_RESULT([$LDFLAGS])
AC_MSG_CHECKING([final LIBS])
AC_MSG_RESULT([$LIBS])
####################################################################
# Setup variables for pkg-config file (maint/pmix.pc.in)
####################################################################
#
# Dependencies that themselves have a pkg-config file available.
#
PC_REQUIRES=""
AS_IF([test "$pmix_hwloc_support_will_build" = "yes"],
[PC_REQUIRES="$PC_REQUIRES hwloc"])
AS_IF([test $pmix_libevent_support -eq 1],
[PC_REQUIRES="$PC_REQUIRES libevent"])
AS_IF([test "$pmix_zlib_support" = "1"],
[PC_REQUIRES="$PC_REQUIRES zlib"])
AC_SUBST([PC_REQUIRES], ["$PC_REQUIRES"])
#
# Dependencies that don't have a pkg-config file available.
# In this case we need to manually add -L<path> and -l<lib>
# to the PC_PRIVATE_LIBS variable.
#
PC_PRIVATE_LIBS=""
AS_IF([test $pmix_libev_support -eq 1],
[PC_PRIVATE_LIBS="$PC_PRIVATE_LIBS $pmix_libev_LDFLAGS $pmix_libev_LIBS"])
AC_SUBST([PC_PRIVATE_LIBS], ["$PC_PRIVATE_LIBS"])
####################################################################
# -Werror for CI scripts
####################################################################
AC_ARG_ENABLE(werror,
AC_HELP_STRING([--enable-werror],
[Treat compiler warnings as errors]),
[
CFLAGS="$CFLAGS -Werror"
])
####################################################################
# Version information
####################################################################
# PMIX_VERSION was setup by PMIX_SETUP_CORE above.
# Make configure depend on the VERSION file, since it's used in AC_INIT
AC_SUBST([CONFIGURE_DEPENDENCIES], ['$(top_srcdir)/VERSION'])
. $srcdir/VERSION
AC_SUBST([libpmix_so_version])
AC_SUBST([libpmi_so_version])
AC_SUBST([libpmi2_so_version])
AC_SUBST([libmca_common_dstore_so_version])
AC_SUBST([libmca_common_sse_so_version])
AC_CONFIG_FILES(pmix_config_prefix[contrib/Makefile]
pmix_config_prefix[examples/Makefile]
pmix_config_prefix[test/Makefile]
pmix_config_prefix[test/python/Makefile]
pmix_config_prefix[test/simple/Makefile]
pmix_config_prefix[test/sshot/Makefile]
pmix_config_prefix[maint/pmix.pc])
pmix_show_title "Configuration complete"
AC_OUTPUT
PMIX_SUMMARY_PRINT
| 1 | 9,683 | Wouldn't we have the same issue with the internal libevent? IIRC, the integration is pretty close to an exact duplicate. Or does the libevent m4 code already take that into account when setting this variable? PMIx _requires_ libevent, so it isn't like pmix_libevent_support can be anything other than 1. I'm okay with leaving this question for separate resolution - just wondering why we don't have the same issue here. | openpmix-openpmix | c |
@@ -51,6 +51,9 @@ func (h *tlsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
strings.Contains(ua, "Trident") {
checked = true
mitm = !info.looksLikeEdge()
+ } else if strings.Contains(ua, "CRiOS") {
+ checked = true
+ mitm = !info.looksLikeChromeOniOS()
} else if strings.Contains(ua, "Chrome") {
checked = true
mitm = !info.looksLikeChrome() | 1 | package httpserver
import (
"bytes"
"context"
"crypto/tls"
"io"
"net"
"net/http"
"strings"
"sync"
"time"
)
// tlsHandler is a http.Handler that will inject a value
// into the request context indicating if the TLS
// connection is likely being intercepted.
type tlsHandler struct {
next http.Handler
listener *tlsHelloListener
closeOnMITM bool // whether to close connection on MITM; TODO: expose through new directive
}
// ServeHTTP checks the User-Agent. For the four main browsers (Chrome,
// Edge, Firefox, and Safari) indicated by the User-Agent, the properties
// of the TLS Client Hello will be compared. The context value "mitm" will
// be set to a value indicating if it is likely that the underlying TLS
// connection is being intercepted.
//
// Note that due to Microsoft's decision to intentionally make IE/Edge
// user agents obscure (and look like other browsers), this may offer
// less accuracy for IE/Edge clients.
//
// This MITM detection capability is based on research done by Durumeric,
// Halderman, et. al. in "The Security Impact of HTTPS Interception" (NDSS '17):
// https://jhalderm.com/pub/papers/interception-ndss17.pdf
func (h *tlsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.listener.helloInfosMu.RLock()
info := h.listener.helloInfos[r.RemoteAddr]
h.listener.helloInfosMu.RUnlock()
ua := r.Header.Get("User-Agent")
var checked, mitm bool
if r.Header.Get("X-BlueCoat-Via") != "" || // Blue Coat (masks User-Agent header to generic values)
r.Header.Get("X-FCCKV2") != "" || // Fortinet
info.advertisesHeartbeatSupport() { // no major browsers have ever implemented Heartbeat
checked = true
mitm = true
} else if strings.Contains(ua, "Edge") || strings.Contains(ua, "MSIE") ||
strings.Contains(ua, "Trident") {
checked = true
mitm = !info.looksLikeEdge()
} else if strings.Contains(ua, "Chrome") {
checked = true
mitm = !info.looksLikeChrome()
} else if strings.Contains(ua, "Firefox") {
checked = true
mitm = !info.looksLikeFirefox()
} else if strings.Contains(ua, "Safari") {
checked = true
mitm = !info.looksLikeSafari()
}
if checked {
r = r.WithContext(context.WithValue(r.Context(), CtxKey("mitm"), mitm))
}
if mitm && h.closeOnMITM {
// TODO: This termination might need to happen later in the middleware
// chain in order to be picked up by the log directive, in case the site
// owner still wants to log this event. It'll probably require a new
// directive. If this feature is useful, we can finish implementing this.
r.Close = true
return
}
h.next.ServeHTTP(w, r)
}
type clientHelloConn struct {
net.Conn
readHello bool
listener *tlsHelloListener
}
func (c *clientHelloConn) Read(b []byte) (n int, err error) {
if !c.readHello {
// Read the header bytes.
hdr := make([]byte, 5)
n, err := io.ReadFull(c.Conn, hdr)
if err != nil {
return n, err
}
// Get the length of the ClientHello message and read it as well.
length := uint16(hdr[3])<<8 | uint16(hdr[4])
hello := make([]byte, int(length))
n, err = io.ReadFull(c.Conn, hello)
if err != nil {
return n, err
}
// Parse the ClientHello and store it in the map.
rawParsed := parseRawClientHello(hello)
c.listener.helloInfosMu.Lock()
c.listener.helloInfos[c.Conn.RemoteAddr().String()] = rawParsed
c.listener.helloInfosMu.Unlock()
// Since we buffered the header and ClientHello, pretend we were
// never here by lining up the buffered values to be read with a
// custom connection type, followed by the rest of the actual
// underlying connection.
mr := io.MultiReader(bytes.NewReader(hdr), bytes.NewReader(hello), c.Conn)
mc := multiConn{Conn: c.Conn, reader: mr}
c.Conn = mc
c.readHello = true
}
return c.Conn.Read(b)
}
// multiConn is a net.Conn that reads from the
// given reader instead of the wire directly. This
// is useful when some of the connection has already
// been read (like the TLS Client Hello) and the
// reader is a io.MultiReader that starts with
// the contents of the buffer.
type multiConn struct {
net.Conn
reader io.Reader
}
// Read reads from mc.reader.
func (mc multiConn) Read(b []byte) (n int, err error) {
return mc.reader.Read(b)
}
// parseRawClientHello parses data which contains the raw
// TLS Client Hello message. It extracts relevant information
// into info. Any error reading the Client Hello (such as
// insufficient length or invalid length values) results in
// a silent error and an incomplete info struct, since there
// is no good way to handle an error like this during Accept().
// The data is expected to contain the whole ClientHello and
// ONLY the ClientHello.
//
// The majority of this code is borrowed from the Go standard
// library, which is (c) The Go Authors. It has been modified
// to fit this use case.
func parseRawClientHello(data []byte) (info rawHelloInfo) {
if len(data) < 42 {
return
}
sessionIdLen := int(data[38])
if sessionIdLen > 32 || len(data) < 39+sessionIdLen {
return
}
data = data[39+sessionIdLen:]
if len(data) < 2 {
return
}
// cipherSuiteLen is the number of bytes of cipher suite numbers. Since
// they are uint16s, the number must be even.
cipherSuiteLen := int(data[0])<<8 | int(data[1])
if cipherSuiteLen%2 == 1 || len(data) < 2+cipherSuiteLen {
return
}
numCipherSuites := cipherSuiteLen / 2
// read in the cipher suites
info.cipherSuites = make([]uint16, numCipherSuites)
for i := 0; i < numCipherSuites; i++ {
info.cipherSuites[i] = uint16(data[2+2*i])<<8 | uint16(data[3+2*i])
}
data = data[2+cipherSuiteLen:]
if len(data) < 1 {
return
}
// read in the compression methods
compressionMethodsLen := int(data[0])
if len(data) < 1+compressionMethodsLen {
return
}
info.compressionMethods = data[1 : 1+compressionMethodsLen]
data = data[1+compressionMethodsLen:]
// ClientHello is optionally followed by extension data
if len(data) < 2 {
return
}
extensionsLength := int(data[0])<<8 | int(data[1])
data = data[2:]
if extensionsLength != len(data) {
return
}
// read in each extension, and extract any relevant information
// from extensions we care about
for len(data) != 0 {
if len(data) < 4 {
return
}
extension := uint16(data[0])<<8 | uint16(data[1])
length := int(data[2])<<8 | int(data[3])
data = data[4:]
if len(data) < length {
return
}
// record that the client advertised support for this extension
info.extensions = append(info.extensions, extension)
switch extension {
case extensionSupportedCurves:
// http://tools.ietf.org/html/rfc4492#section-5.5.1
if length < 2 {
return
}
l := int(data[0])<<8 | int(data[1])
if l%2 == 1 || length != l+2 {
return
}
numCurves := l / 2
info.curves = make([]tls.CurveID, numCurves)
d := data[2:]
for i := 0; i < numCurves; i++ {
info.curves[i] = tls.CurveID(d[0])<<8 | tls.CurveID(d[1])
d = d[2:]
}
case extensionSupportedPoints:
// http://tools.ietf.org/html/rfc4492#section-5.5.2
if length < 1 {
return
}
l := int(data[0])
if length != l+1 {
return
}
info.points = make([]uint8, l)
copy(info.points, data[1:])
}
data = data[length:]
}
return
}
// newTLSListener returns a new tlsHelloListener that wraps ln.
func newTLSListener(ln net.Listener, config *tls.Config, readTimeout time.Duration) *tlsHelloListener {
return &tlsHelloListener{
Listener: ln,
config: config,
readTimeout: readTimeout,
helloInfos: make(map[string]rawHelloInfo),
}
}
// tlsHelloListener is a TLS listener that is specially designed
// to read the ClientHello manually so we can extract necessary
// information from it. Each ClientHello message is mapped by
// the remote address of the client, which must be removed when
// the connection is closed (use ConnState).
type tlsHelloListener struct {
net.Listener
config *tls.Config
readTimeout time.Duration
helloInfos map[string]rawHelloInfo
helloInfosMu sync.RWMutex
}
// Accept waits for and returns the next connection to the listener.
// After it accepts the underlying connection, it reads the
// ClientHello message and stores the parsed data into a map on l.
func (l *tlsHelloListener) Accept() (net.Conn, error) {
conn, err := l.Listener.Accept()
if err != nil {
return nil, err
}
helloConn := &clientHelloConn{Conn: conn, listener: l}
return tls.Server(helloConn, l.config), nil
}
// rawHelloInfo contains the "raw" data parsed from the TLS
// Client Hello. No interpretation is done on the raw data.
//
// The methods on this type implement heuristics described
// by Durumeric, Halderman, et. al. in
// "The Security Impact of HTTPS Interception":
// https://jhalderm.com/pub/papers/interception-ndss17.pdf
type rawHelloInfo struct {
cipherSuites []uint16
extensions []uint16
compressionMethods []byte
curves []tls.CurveID
points []uint8
}
// advertisesHeartbeatSupport returns true if info indicates
// that the client supports the Heartbeat extension.
func (info rawHelloInfo) advertisesHeartbeatSupport() bool {
for _, ext := range info.extensions {
if ext == extensionHeartbeat {
return true
}
}
return false
}
// looksLikeFirefox returns true if info looks like a handshake
// from a modern version of Firefox.
func (info rawHelloInfo) looksLikeFirefox() bool {
// "To determine whether a Firefox session has been
// intercepted, we check for the presence and order
// of extensions, cipher suites, elliptic curves,
// EC point formats, and handshake compression methods."
// We check for the presence and order of the extensions.
// Note: Sometimes padding (21) is present, sometimes not.
// Note: Firefox 51+ does not advertise 0x3374 (13172, NPN).
// Note: Firefox doesn't advertise 0x0 (0, SNI) when connecting to IP addresses.
requiredExtensionsOrder := []uint16{23, 65281, 10, 11, 35, 16, 5, 65283, 13}
if !assertPresenceAndOrdering(requiredExtensionsOrder, info.extensions, true) {
return false
}
// We check for both presence of curves and their ordering.
expectedCurves := []tls.CurveID{29, 23, 24, 25}
if len(info.curves) != len(expectedCurves) {
return false
}
for i := range expectedCurves {
if info.curves[i] != expectedCurves[i] {
return false
}
}
// We check for order of cipher suites but not presence, since
// according to the paper, cipher suites may be not be added
// or reordered by the user, but they may be disabled.
expectedCipherSuiteOrder := []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, // 0xc02b
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // 0xc02f
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, // 0xcca9
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, // 0xcca8
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, // 0xc02c
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, // 0xc030
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // 0xc00a
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // 0xc009
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // 0xc013
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // 0xc014
TLS_DHE_RSA_WITH_AES_128_CBC_SHA, // 0x33
TLS_DHE_RSA_WITH_AES_256_CBC_SHA, // 0x39
tls.TLS_RSA_WITH_AES_128_CBC_SHA, // 0x2f
tls.TLS_RSA_WITH_AES_256_CBC_SHA, // 0x35
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // 0xa
}
return assertPresenceAndOrdering(expectedCipherSuiteOrder, info.cipherSuites, false)
}
// looksLikeChrome returns true if info looks like a handshake
// from a modern version of Chrome.
func (info rawHelloInfo) looksLikeChrome() bool {
// "We check for ciphers and extensions that Chrome is known
// to not support, but do not check for the inclusion of
// specific ciphers or extensions, nor do we validate their
// order. When appropriate, we check the presence and order
// of elliptic curves, compression methods, and EC point formats."
// Not in Chrome 56, but present in Safari 10 (Feb. 2017):
// TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 (0xc024)
// TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 (0xc023)
// TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA (0xc00a)
// TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA (0xc009)
// TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 (0xc028)
// TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 (0xc027)
// TLS_RSA_WITH_AES_256_CBC_SHA256 (0x3d)
// TLS_RSA_WITH_AES_128_CBC_SHA256 (0x3c)
// Not in Chrome 56, but present in Firefox 51 (Feb. 2017):
// TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA (0xc00a)
// TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA (0xc009)
// TLS_DHE_RSA_WITH_AES_128_CBC_SHA (0x33)
// TLS_DHE_RSA_WITH_AES_256_CBC_SHA (0x39)
// Selected ciphers present in Chrome mobile (Feb. 2017):
// 0xc00a, 0xc014, 0xc009, 0x9c, 0x9d, 0x2f, 0x35, 0xa
chromeCipherExclusions := map[uint16]struct{}{
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384: {}, // 0xc024
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: {}, // 0xc023
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384: {}, // 0xc028
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: {}, // 0xc027
TLS_RSA_WITH_AES_256_CBC_SHA256: {}, // 0x3d
tls.TLS_RSA_WITH_AES_128_CBC_SHA256: {}, // 0x3c
TLS_DHE_RSA_WITH_AES_128_CBC_SHA: {}, // 0x33
TLS_DHE_RSA_WITH_AES_256_CBC_SHA: {}, // 0x39
}
for _, ext := range info.cipherSuites {
if _, ok := chromeCipherExclusions[ext]; ok {
return false
}
}
// Chrome does not include curve 25 (CurveP521) (as of Chrome 56, Feb. 2017).
for _, curve := range info.curves {
if curve == 25 {
return false
}
}
return true
}
// looksLikeEdge returns true if info looks like a handshake
// from a modern version of MS Edge.
func (info rawHelloInfo) looksLikeEdge() bool {
// "SChannel connections can by uniquely identified because SChannel
// is the only TLS library we tested that includes the OCSP status
// request extension before the supported groups and EC point formats
// extensions."
//
// More specifically, the OCSP status request extension appears
// *directly* before the other two extensions, which occur in that
// order. (I contacted the authors for clarification and verified it.)
for i, ext := range info.extensions {
if ext == extensionOCSPStatusRequest {
if len(info.extensions) <= i+2 {
return false
}
if info.extensions[i+1] != extensionSupportedCurves ||
info.extensions[i+2] != extensionSupportedPoints {
return false
}
}
}
for _, cs := range info.cipherSuites {
// As of Feb. 2017, Edge does not have 0xff, but Avast adds it
if cs == scsvRenegotiation {
return false
}
// Edge and modern IE do not have 0x4 or 0x5, but Blue Coat does
if cs == TLS_RSA_WITH_RC4_128_MD5 || cs == tls.TLS_RSA_WITH_RC4_128_SHA {
return false
}
}
return true
}
// looksLikeSafari returns true if info looks like a handshake
// from a modern version of MS Safari.
func (info rawHelloInfo) looksLikeSafari() bool {
// "One unique aspect of Secure Transport is that it includes
// the TLS_EMPTY_RENEGOTIATION_INFO_SCSV (0xff) cipher first,
// whereas the other libraries we investigated include the
// cipher last. Similar to Microsoft, Apple has changed
// TLS behavior in minor OS updates, which are not indicated
// in the HTTP User-Agent header. We allow for any of the
// updates when validating handshakes, and we check for the
// presence and ordering of ciphers, extensions, elliptic
// curves, and compression methods."
// Note that any C lib (e.g. curl) compiled on macOS
// will probably use Secure Transport which will also
// share the TLS handshake characteristics of Safari.
// Let's do the easy check first... should be sufficient in many cases.
if len(info.cipherSuites) < 1 {
return false
}
if info.cipherSuites[0] != scsvRenegotiation {
return false
}
// We check for the presence and order of the extensions.
requiredExtensionsOrder := []uint16{10, 11, 13, 13172, 16, 5, 18, 23}
if !assertPresenceAndOrdering(requiredExtensionsOrder, info.extensions, true) {
return false
}
// We check for order and presence of cipher suites
expectedCipherSuiteOrder := []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, // 0xc02c
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, // 0xc02b
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, // 0xc024
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, // 0xc023
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // 0xc00a
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // 0xc009
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, // 0xc030
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // 0xc02f
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, // 0xc028
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, // 0xc027
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // 0xc014
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // 0xc013
tls.TLS_RSA_WITH_AES_256_GCM_SHA384, // 0x9d
tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // 0x9c
TLS_RSA_WITH_AES_256_CBC_SHA256, // 0x3d
TLS_RSA_WITH_AES_128_CBC_SHA256, // 0x3c
tls.TLS_RSA_WITH_AES_256_CBC_SHA, // 0x35
tls.TLS_RSA_WITH_AES_128_CBC_SHA, // 0x2f
}
return assertPresenceAndOrdering(expectedCipherSuiteOrder, info.cipherSuites, true)
}
// assertPresenceAndOrdering will return true if candidateList contains
// the items in requiredItems in the same order as requiredItems.
//
// If requiredIsSubset is true, then all items in requiredItems must be
// present in candidateList. If requiredIsSubset is false, then requiredItems
// may contain items that are not in candidateList.
//
// In all cases, the order of requiredItems is enforced.
func assertPresenceAndOrdering(requiredItems, candidateList []uint16, requiredIsSubset bool) bool {
superset := requiredItems
subset := candidateList
if requiredIsSubset {
superset = candidateList
subset = requiredItems
}
var j int
for _, item := range subset {
var found bool
for j < len(superset) {
if superset[j] == item {
found = true
break
}
j++
}
if j == len(superset) && !found {
return false
}
}
return true
}
const (
extensionOCSPStatusRequest = 5
extensionSupportedCurves = 10 // also called "SupportedGroups"
extensionSupportedPoints = 11
extensionHeartbeat = 15
scsvRenegotiation = 0xff
// cipher suites missing from the crypto/tls package,
// in no particular order here
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xc024
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xc023
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xc028
TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x3c
TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x3d
TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x33
TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x39
TLS_RSA_WITH_RC4_128_MD5 = 0x4
)
| 1 | 10,147 | The actual User-Agent string uses "CriOS" not "CRiOS" | caddyserver-caddy | go |
@@ -19,10 +19,8 @@ namespace Nethermind.Core
{
public interface IKeyValueStoreWithBatching : IKeyValueStore
{
- byte[]? this[byte[] key] { get; set; }
-
void StartBatch();
-
+
void CommitBatch();
}
-}
+} | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
//
namespace Nethermind.Core
{
public interface IKeyValueStoreWithBatching : IKeyValueStore
{
byte[]? this[byte[] key] { get; set; }
void StartBatch();
void CommitBatch();
}
} | 1 | 24,816 | Unused code deletion. Is that a desired thing? No implementation of this actually used this indexer, so removing it seemed reasonable (and was suggested by the compiler). Maybe this had an indexer so plugins could use it? This change is definitely pushing the limits of what is appropriate in a "cleanup" as it is changing public API surface area. | NethermindEth-nethermind | .cs |
@@ -489,6 +489,14 @@ module Beaker
end
rescue Exception => teardown_exception
+ begin
+ if !host.is_pe?
+ dump_puppet_log(host)
+ end
+ rescue Exception => dumping_exception
+ logger.error("Raised during attempt to dump puppet logs: #{dumping_exception}")
+ end
+
if original_exception
logger.error("Raised during attempt to teardown with_puppet_running_on: #{teardown_exception}\n---\n")
raise original_exception | 1 | require 'resolv'
require 'inifile'
require 'timeout'
require 'beaker/dsl/outcomes'
module Beaker
module DSL
# This is the heart of the Puppet Acceptance DSL. Here you find a helper
# to proxy commands to hosts, more commands to move files between hosts
# and execute remote scripts, confine test cases to certain hosts and
# prepare the state of a test case.
#
# To mix this is into a class you need the following:
# * a method *hosts* that yields any hosts implementing
# {Beaker::Host}'s interface to act upon.
# * a method *logger* that yields a logger implementing
# {Beaker::Logger}'s interface.
# * the module {Beaker::DSL::Roles} that provides access to the various hosts implementing
# {Beaker::Host}'s interface to act upon
# * the module {Beaker::DSL::Wrappers} the provides convenience methods for {Beaker::DSL::Command} creation
#
#
# @api dsl
module Helpers
# @!macro common_opts
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Boolean] :silent (false) Do not produce log output
# @option opts [Array<Fixnum>] :acceptable_exit_codes ([0]) An array
# (or range) of integer exit codes that should be considered
# acceptable. An error will be thrown if the exit code does not
# match one of the values in this list.
# @option opts [Hash{String=>String}] :environment ({}) These will be
# treated as extra environment variables that should be set before
# running the command.
#
# The primary method for executing commands *on* some set of hosts.
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param [String, Command] command The command to execute on *host*.
# @param [Proc] block Additional actions or assertions.
# @!macro common_opts
#
# @example Most basic usage
# on hosts, 'ls /tmp'
#
# @example Allowing additional exit codes to pass
# on agents, 'puppet agent -t', :acceptable_exit_codes => [0,2]
#
# @example Using the returned result for any kind of checking
# if on(host, 'ls -la ~').stdout =~ /\.bin/
# ...do some action...
# end
#
# @example Using TestCase helpers from within a test.
# agents.each do |agent|
# on agent, 'cat /etc/puppet/puppet.conf' do
# assert_match stdout, /server = #{master}/, 'WTF Mate'
# end
# end
#
# @example Using a role (defined in a String) to identify the host
# on "master", "echo hello"
#
# @example Using a role (defined in a Symbol) to identify the host
# on :dashboard, "echo hello"
#
# @return [Result] An object representing the outcome of *command*.
# @raise [FailTest] Raises an exception if *command* obviously fails.
def on(host, command, opts = {}, &block)
unless command.is_a? Command
cmd_opts = {}
if opts[:environment]
cmd_opts['ENV'] = opts[:environment]
end
command = Command.new(command.to_s, [], cmd_opts)
end
if host.is_a? String or host.is_a? Symbol
host = hosts_as(host) #check by role
end
if host.is_a? Array
host.map { |h| on h, command, opts, &block }
else
@result = host.exec(command, opts)
# Also, let additional checking be performed by the caller.
yield self if block_given?
return @result
end
end
# The method for executing commands on the default host
#
# @param [String, Command] command The command to execute on *host*.
# @param [Proc] block Additional actions or assertions.
# @!macro common_opts
#
# @example Most basic usage
# shell 'ls /tmp'
#
# @example Allowing additional exit codes to pass
# shell 'puppet agent -t', :acceptable_exit_codes => [0,2]
#
# @example Using the returned result for any kind of checking
# if shell('ls -la ~').stdout =~ /\.bin/
# ...do some action...
# end
#
# @example Using TestCase helpers from within a test.
# agents.each do |agent|
# shell('cat /etc/puppet/puppet.conf') do |result|
# assert_match result.stdout, /server = #{master}/, 'WTF Mate'
# end
# end
#
# @return [Result] An object representing the outcome of *command*.
# @raise [FailTest] Raises an exception if *command* obviously fails.
def shell(command, opts = {}, &block)
on(default, command, opts, &block)
end
# @deprecated
# An proxy for the last {Beaker::Result#stdout} returned by
# a method that makes remote calls. Use the {Beaker::Result}
# object returned by the method directly instead. For Usage see
# {Beaker::Result}.
def stdout
return nil if @result.nil?
@result.stdout
end
# @deprecated
# An proxy for the last {Beaker::Result#stderr} returned by
# a method that makes remote calls. Use the {Beaker::Result}
# object returned by the method directly instead. For Usage see
# {Beaker::Result}.
def stderr
return nil if @result.nil?
@result.stderr
end
# @deprecated
# An proxy for the last {Beaker::Result#exit_code} returned by
# a method that makes remote calls. Use the {Beaker::Result}
# object returned by the method directly instead. For Usage see
# {Beaker::Result}.
def exit_code
return nil if @result.nil?
@result.exit_code
end
# Move a file from a remote to a local path
# @note If using {Beaker::Host} for the hosts *scp* is not
# required on the system as it uses Ruby's net/scp library. The
# net-scp gem however is required (and specified in the gemspec).
#
# @param [Host, #do_scp_from] host One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_from}.
# @param [String] from_path A remote path to a file.
# @param [String] to_path A local path to copy *from_path* to.
# @!macro common_opts
#
# @return [Result] Returns the result of the SCP operation
def scp_from host, from_path, to_path, opts = {}
if host.is_a? Array
host.each { |h| scp_from h, from_path, to_path, opts }
else
@result = host.do_scp_from(from_path, to_path, opts)
@result.log logger
end
end
# Move a local file to a remote host
# @note If using {Beaker::Host} for the hosts *scp* is not
# required on the system as it uses Ruby's net/scp library. The
# net-scp gem however is required (and specified in the gemspec.
#
# @param [Host, #do_scp_to] host One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_to}.
# @param [String] from_path A local path to a file.
# @param [String] to_path A remote path to copy *from_path* to.
# @!macro common_opts
#
# @return [Result] Returns the result of the SCP operation
def scp_to host, from_path, to_path, opts = {}
if host.is_a? Array
host.each { |h| scp_to h, from_path, to_path, opts }
else
@result = host.do_scp_to(from_path, to_path, opts)
@result.log logger
end
end
# Check to see if a package is installed on a remote host
#
# @param [Host] host A host object
# @param [String] package_name Name of the package to check for.
#
# @return [Boolean] true/false if the package is found
def check_for_package host, package_name
host.check_for_package package_name
end
# Install a package on a host
#
# @param [Host] host A host object
# @param [String] package_name Name of the package to install
#
# @return [Result] An object representing the outcome of *install command*.
def install_package host, package_name
host.install_package package_name
end
# Upgrade a package on a host. The package must already be installed
#
# @param [Host] host A host object
# @param [String] package_name Name of the package to install
#
# @return [Result] An object representing the outcome of *upgrade command*.
def upgrade_package host, package_name
host.upgrade_package package_name
end
# Deploy packaging configurations generated by
# https://github.com/puppetlabs/packaging to a host.
#
# @note To ensure the repo configs are available for deployment,
# you should run `rake pl:jenkins:deb_repo_configs` and
# `rake pl:jenkins:rpm_repo_configs` on your project checkout
#
# @param [Host] host
# @param [String] path The path to the generated repository config
# files. ex: /myproject/pkg/repo_configs
# @param [String] name A human-readable name for the repository
# @param [String[ version The version of the project, as used by the
# packaging tools. This can be determined with
# `rake pl:print_build_params` from the packaging
# repo.
def deploy_package_repo host, path, name, version
host.deploy_package_repo path, name, version
end
# Create a remote file out of a string
# @note This method uses Tempfile in Ruby's STDLIB as well as {#scp_to}.
#
# @param [Host, #do_scp_to] hosts One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_from}.
# @param [String] file_path A remote path to place *file_content* at.
# @param [String] file_content The contents of the file to be placed.
# @!macro common_opts
#
# @return [Result] Returns the result of the underlying SCP operation.
def create_remote_file(hosts, file_path, file_content, opts = {})
Tempfile.open 'beaker' do |tempfile|
File.open(tempfile.path, 'w') {|file| file.puts file_content }
scp_to hosts, tempfile.path, file_path, opts
end
end
# Move a local script to a remote host and execute it
# @note this relies on {#on} and {#scp_to}
#
# @param [Host, #do_scp_to] host One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_from}.
# @param [String] script A local path to find an executable script at.
# @!macro common_opts
# @param [Proc] block Additional tests to run after script has executed
#
# @return [Result] Returns the result of the underlying SCP operation.
def run_script_on(host, script, opts = {}, &block)
# this is unsafe as it uses the File::SEPARATOR will be set to that
# of the coordinator node. This works for us because we use cygwin
# which will properly convert the paths. Otherwise this would not
# work for running tests on a windows machine when the coordinator
# that the harness is running on is *nix. We should use
# {Beaker::Host#temp_path} instead. TODO
remote_path = File.join("", "tmp", File.basename(script))
scp_to host, script, remote_path
on host, remote_path, opts, &block
end
# Move a local script to default host and execute it
# @see #run_script_on
def run_script(script, opts = {}, &block)
run_script_on(default, script, opts, &block)
end
# Limit the hosts a test case is run against
# @note This will modify the {Beaker::TestCase#hosts} member
# in place unless an array of hosts is passed into it and
# {Beaker::TestCase#logger} yielding an object that responds
# like {Beaker::Logger#warn}, as well as
# {Beaker::DSL::Outcomes#skip_test}, and optionally
# {Beaker::TestCase#hosts}.
#
# @param [Symbol] type The type of confinement to do. Valid parameters
# are *:to* to confine the hosts to only those that
# match *criteria* or *:except* to confine the test
# case to only those hosts that do not match
# criteria.
# @param [Hash{Symbol,String=>String,Regexp,Array<String,Regexp>}]
# criteria Specify the criteria with which a host should be
# considered for inclusion or exclusion. The key is any attribute
# of the host that will be yielded by {Beaker::Host#[]}.
# The value can be any string/regex or array of strings/regexp.
# The values are compared using [Enumerable#any?] so that if one
# value of an array matches the host is considered a match for that
# criteria.
# @param [Array<Host>] host_array This creatively named parameter is
# an optional array of hosts to confine to. If not passed in, this
# method will modify {Beaker::TestCase#hosts} in place.
# @param [Proc] block Addition checks to determine suitability of hosts
# for confinement. Each host that is still valid after checking
# *criteria* is then passed in turn into this block. The block
# should return true if the host matches this additional criteria.
#
# @example Basic usage to confine to debian OSes.
# confine :to, :platform => 'debian'
#
# @example Confining to anything but Windows and Solaris
# confine :except, :platform => ['windows', 'solaris']
#
# @example Using additional block to confine to Solaris global zone.
# confine :to, :platform => 'solaris' do |solaris|
# on( solaris, 'zonename' ) =~ /global/
# end
#
# @return [Array<Host>] Returns an array of hosts that are still valid
# targets for this tests case.
# @raise [SkipTest] Raises skip test if there are no valid hosts for
# this test case after confinement.
def confine(type, criteria, host_array = nil, &block)
provided_hosts = host_array ? true : false
hosts_to_modify = host_array || hosts
criteria.each_pair do |property, value|
case type
when :except
hosts_to_modify = hosts_to_modify.reject do |host|
inspect_host host, property, value
end
if block_given?
hosts_to_modify = hosts_to_modify.reject do |host|
yield host
end
end
when :to
hosts_to_modify = hosts_to_modify.select do |host|
inspect_host host, property, value
end
if block_given?
hosts_to_modify = hosts_to_modify.select do |host|
yield host
end
end
else
raise "Unknown option #{type}"
end
end
if hosts_to_modify.empty?
logger.warn "No suitable hosts with: #{criteria.inspect}"
skip_test 'No suitable hosts found'
end
self.hosts = hosts_to_modify
hosts_to_modify
end
# Ensures that host restrictions as specifid by type, criteria and
# host_array are confined to activity within the passed block.
# TestCase#hosts is reset after block has executed.
#
# @see #confine
def confine_block(type, criteria, host_array = nil, &block)
begin
original_hosts = self.hosts.dup
confine(type, criteria, host_array)
yield
ensure
self.hosts = original_hosts
end
end
# @!visibility private
def inspect_host(host, property, one_or_more_values)
values = Array(one_or_more_values)
return values.any? do |value|
true_false = false
case value
when String
true_false = host[property.to_s].include? value
when Regexp
true_false = host[property.to_s] =~ value
end
true_false
end
end
# Test Puppet running in a certain run mode with specific options.
# This ensures the following steps are performed:
# 1. The pre-test Puppet configuration is backed up
# 2. A new Puppet configuraton file is layed down
# 3. Puppet is started or restarted in the specified run mode
# 4. Ensure Puppet has started correctly
# 5. Further tests are yielded to
# 6. Revert Puppet to the pre-test state
# 7. Testing artifacts are saved in a folder named for the test
#
# @param [Host] host One object that act like Host
#
# @param [Hash{Symbol=>String}] conf_opts Represents puppet settings.
# Sections of the puppet.conf may be
# specified, if no section is specified the
# a puppet.conf file will be written with the
# options put in a section named after [mode]
#
# There is a special setting for command_line
# arguments such as --debug or --logdest, which
# cannot be set in puppet.conf. For example:
#
# :__commandline_args__ => '--logdest /tmp/a.log'
#
# These will only be applied when starting a FOSS
# master, as a pe master is just bounced.
#
# @param [File] testdir The temporary directory which will hold backup
# configuration, and other test artifacts.
#
# @param [Block] block The point of this method, yields so
# tests may be ran. After the block is finished
# puppet will revert to a previous state.
#
# @example A simple use case to ensure a master is running
# with_puppet_running_on( master ) do
# ...tests that require a master...
# end
#
# @example Fully utilizing the possiblities of config options
# with_puppet_running_on( master,
# :main => {:logdest => '/var/blah'},
# :master => {:masterlog => '/elswhere'},
# :agent => {:server => 'localhost'} ) do
#
# ...tests to be ran...
# end
#
# @api dsl
def with_puppet_running_on host, conf_opts, testdir = host.tmpdir(File.basename(@path)), &block
raise(ArgumentError, "with_puppet_running_on's conf_opts must be a Hash. You provided a #{conf_opts.class}: '#{conf_opts}'") if !conf_opts.kind_of?(Hash)
cmdline_args = conf_opts.delete(:__commandline_args__)
begin
backup_file = backup_the_file(host, host['puppetpath'], testdir, 'puppet.conf')
lay_down_new_puppet_conf host, conf_opts, testdir
if host.is_pe?
bounce_service( host, 'pe-httpd' )
else
puppet_master_started = start_puppet_from_source_on!( host, cmdline_args )
end
yield self if block_given?
rescue Exception => early_exception
original_exception = RuntimeError.new("PuppetAcceptance::DSL::Helpers.with_puppet_running_on failed (check backtrace for location) because: #{early_exception}\n#{early_exception.backtrace.join("\n")}\n")
raise(original_exception)
ensure
begin
restore_puppet_conf_from_backup( host, backup_file )
if host.is_pe?
bounce_service( host, 'pe-httpd' )
else
if puppet_master_started
stop_puppet_from_source_on( host )
else
dump_puppet_log(host)
end
end
rescue Exception => teardown_exception
if original_exception
logger.error("Raised during attempt to teardown with_puppet_running_on: #{teardown_exception}\n---\n")
raise original_exception
else
raise teardown_exception
end
end
end
end
# Test Puppet running in a certain run mode with specific options,
# on the default host
# @api dsl
# @see #with_puppet_running_on
def with_puppet_running conf_opts, testdir = host.tmpdir(File.basename(@path)), &block
with_puppet_running_on(default, conf_opts, testdir, &block)
end
# @!visibility private
def restore_puppet_conf_from_backup( host, backup_file )
puppetpath = host['puppetpath']
puppet_conf = File.join(puppetpath, "puppet.conf")
if backup_file
host.exec( Command.new( "if [ -f '#{backup_file}' ]; then " +
"cat '#{backup_file}' > " +
"'#{puppet_conf}'; " +
"rm -f '#{backup_file}'; " +
"fi" ) )
else
host.exec( Command.new( "rm -f '#{puppet_conf}'" ))
end
end
# Back up the given file in the current_dir to the new_dir
#
# @!visibility private
#
# @param host [Beaker::Host] The target host
# @param current_dir [String] The directory containing the file to back up
# @param new_dir [String] The directory to copy the file to
# @param filename [String] The file to back up. Defaults to 'puppet.conf'
#
# @return [String, nil] The path to the file if the file exists, nil if it
# doesn't exist.
def backup_the_file host, current_dir, new_dir, filename = 'puppet.conf'
old_location = current_dir + '/' + filename
new_location = new_dir + '/' + filename + '.bak'
if host.file_exist? old_location
host.exec( Command.new( "cp #{old_location} #{new_location}" ) )
return new_location
else
logger.warn "Could not backup file '#{old_location}': no such file"
nil
end
end
# @!visibility private
def start_puppet_from_source_on! host, args = ''
host.exec( puppet( 'master', args ) )
logger.debug 'Waiting for the puppet master to start'
unless port_open_within?( host, 8140, 10 )
raise Beaker::DSL::FailTest, 'Puppet master did not start in a timely fashion'
end
logger.debug 'The puppet master has started'
return true
end
# @!visibility private
def stop_puppet_from_source_on( host )
pid = host.exec( Command.new('cat `puppet master --configprint pidfile`') ).stdout.chomp
host.exec( Command.new( "kill #{pid}" ) )
Timeout.timeout(10) do
while host.exec( Command.new( "kill -0 #{pid}"), :acceptable_exit_codes => [0,1] ).exit_code == 0 do
# until kill -0 finds no process and we know that puppet has finished cleaning up
sleep 1
end
end
end
# @!visibility private
def dump_puppet_log(host)
syslogfile = case host['platform']
when /fedora|centos|el/ then '/var/log/messages'
when /ubuntu|debian/ then '/var/log/syslog'
else return
end
logger.notify "\n*************************"
logger.notify "* Dumping master log *"
logger.notify "*************************"
host.exec( Command.new( "tail -n 100 #{syslogfile}" ), :acceptable_exit_codes => [0,1])
logger.notify "*************************\n"
end
# @!visibility private
def lay_down_new_puppet_conf( host, configuration_options, testdir )
new_conf = puppet_conf_for( host, configuration_options )
create_remote_file host, "#{testdir}/puppet.conf", new_conf.to_s
host.exec(
Command.new( "cat #{testdir}/puppet.conf > #{host['puppetpath']}/puppet.conf" ),
:silent => true
)
host.exec( Command.new( "cat #{host['puppetpath']}/puppet.conf" ) )
end
# @!visibility private
def puppet_conf_for host, conf_opts
puppetconf = host.exec( Command.new( "cat #{host['puppetpath']}/puppet.conf" ) ).stdout
new_conf = IniFile.new( puppetconf ).merge( conf_opts )
new_conf
end
# @!visibility private
def bounce_service host, service
# Any reason to not
# host.exec puppet_resource( 'service', service, 'ensure=stopped' )
# host.exec puppet_resource( 'service', service, 'ensure=running' )
host.exec( Command.new( "/etc/init.d/#{service} restart" ) )
end
# Blocks until the port is open on the host specified, returns false
# on failure
def port_open_within?( host, port = 8140, seconds = 120 )
repeat_for( seconds ) do
host.port_open?( port )
end
end
# Runs 'puppet apply' on a remote host, piping manifest through stdin
#
# @param [Host] host The host that this command should be run on
#
# @param [String] manifest The puppet manifest to apply
#
# @!macro common_opts
# @option opts [Boolean] :parseonly (false) If this key is true, the
# "--parseonly" command line parameter will
# be passed to the 'puppet apply' command.
#
# @option opts [Boolean] :trace (false) If this key exists in the Hash,
# the "--trace" command line parameter will be
# passed to the 'puppet apply' command.
#
# @option opts [Array<Integer>] :acceptable_exit_codes ([0]) The list of exit
# codes that will NOT raise an error when found upon
# command completion. If provided, these values will
# be combined with those used in :catch_failures and
# :expect_failures to create the full list of
# passing exit codes.
#
# @options opts [Hash] :environment Additional environment variables to be
# passed to the 'puppet apply' command
#
# @option opts [Boolean] :catch_failures (false) By default `puppet
# --apply` will exit with 0, which does not count
# as a test failure, even if there were errors or
# changes when applying the manifest. This option
# enables detailed exit codes and causes a test
# failure if `puppet --apply` indicates there was
# a failure during its execution.
#
# @option opts [Boolean] :catch_changes (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates that there were
# changes or failures during its execution.
#
# @option opts [Boolean] :expect_changes (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates that there were
# no resource changes during its execution.
#
# @option opts [Boolean] :expect_failures (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates there were no
# failure during its execution.
#
# @param [Block] block This method will yield to a block of code passed
# by the caller; this can be used for additional
# validation, etc.
#
def apply_manifest_on(host, manifest, opts = {}, &block)
if host.is_a?(Array)
return host.map do |h|
apply_manifest_on(h, manifest, opts, &block)
end
end
on_options = {}
on_options[:acceptable_exit_codes] = Array(opts[:acceptable_exit_codes])
args = ["--verbose"]
args << "--parseonly" if opts[:parseonly]
args << "--trace" if opts[:trace]
# From puppet help:
# "... an exit code of '2' means there were changes, an exit code of
# '4' means there were failures during the transaction, and an exit
# code of '6' means there were both changes and failures."
if [opts[:catch_changes],opts[:catch_failures],opts[:expect_failures],opts[:expect_changes]].select{|x|x}.length > 1
raise(ArgumentError, "Cannot specify more than one of `catch_failures`, `catch_changes`, `expect_failures`, or `expect_changes` for a single manifest")
end
if opts[:catch_changes]
args << '--detailed-exitcodes'
# We're after idempotency so allow exit code 0 only.
on_options[:acceptable_exit_codes] |= [0]
elsif opts[:catch_failures]
args << '--detailed-exitcodes'
# We're after only complete success so allow exit codes 0 and 2 only.
on_options[:acceptable_exit_codes] |= [0, 2]
elsif opts[:expect_failures]
args << '--detailed-exitcodes'
# We're after failures specifically so allow exit codes 1, 4, and 6 only.
on_options[:acceptable_exit_codes] |= [1, 4, 6]
elsif opts[:expect_changes]
args << '--detailed-exitcodes'
# We're after changes specifically so allow exit code 2 only.
on_options[:acceptable_exit_codes] |= [2]
else
# Either use the provided acceptable_exit_codes or default to [0]
on_options[:acceptable_exit_codes] |= [0]
end
# Not really thrilled with this implementation, might want to improve it
# later. Basically, there is a magic trick in the constructor of
# PuppetCommand which allows you to pass in a Hash for the last value in
# the *args Array; if you do so, it will be treated specially. So, here
# we check to see if our caller passed us a hash of environment variables
# that they want to set for the puppet command. If so, we set the final
# value of *args to a new hash with just one entry (the value of which
# is our environment variables hash)
if opts.has_key?(:environment)
args << { :environment => opts[:environment]}
end
file_path = host.tmpfile('apply_manifest.pp')
create_remote_file(host, file_path, manifest + "\n")
args << file_path
on host, puppet( 'apply', *args), on_options, &block
end
# Runs 'puppet apply' on default host, piping manifest through stdin
# @see #apply_manifest_on
def apply_manifest(manifest, opts = {}, &block)
apply_manifest_on(default, manifest, opts, &block)
end
# @deprecated
def run_agent_on(host, arg='--no-daemonize --verbose --onetime --test',
options={}, &block)
if host.is_a? Array
host.each { |h| run_agent_on h, arg, options, &block }
else
on host, puppet_agent(arg), options, &block
end
end
# FIX: this should be moved into host/platform
# @visibility private
def run_cron_on(host, action, user, entry="", &block)
platform = host['platform']
if platform.include?('solaris') || platform.include?('aix') then
case action
when :list then args = '-l'
when :remove then args = '-r'
when :add
on( host,
"echo '#{entry}' > /var/spool/cron/crontabs/#{user}",
&block )
end
else # default for GNU/Linux platforms
case action
when :list then args = '-l -u'
when :remove then args = '-r -u'
when :add
on( host,
"echo '#{entry}' > /tmp/#{user}.cron && " +
"crontab -u #{user} /tmp/#{user}.cron",
&block )
end
end
if args
case action
when :list, :remove then on(host, "crontab #{args} #{user}", &block)
end
end
end
# This method accepts a block and using the puppet resource 'host' will
# setup host aliases before and after that block.
#
# A teardown step is also added to make sure unstubbing of the host is
# removed always.
#
# @param machine [String] the host to execute this stub
# @param ip_spec [Hash{String=>String}] a hash containing the host to ip
# mappings
# @example Stub puppetlabs.com on the master to 127.0.0.1
# stub_hosts_on(master, 'puppetlabs.com' => '127.0.0.1')
def stub_hosts_on(machine, ip_spec)
ip_spec.each do |host, ip|
logger.notify("Stubbing host #{host} to IP #{ip} on machine #{machine}")
on( machine,
puppet('resource', 'host', host, 'ensure=present', "ip=#{ip}") )
end
teardown do
ip_spec.each do |host, ip|
logger.notify("Unstubbing host #{host} to IP #{ip} on machine #{machine}")
on( machine,
puppet('resource', 'host', host, 'ensure=absent') )
end
end
end
# This method accepts a block and using the puppet resource 'host' will
# setup host aliases before and after that block on the default host
#
# @example Stub puppetlabs.com on the default host to 127.0.0.1
# stub_hosts('puppetlabs.com' => '127.0.0.1')
# @see #stub_hosts_on
def stub_hosts(ip_spec)
stub_hosts_on(default, ip_spec)
end
# This wraps the method `stub_hosts_on` and makes the stub specific to
# the forge alias.
#
# forge api v1 canonical source is forge.puppetlabs.com
# forge api v3 canonical source is forgeapi.puppetlabs.com
#
# @param machine [String] the host to perform the stub on
def stub_forge_on(machine)
@forge_ip ||= Resolv.getaddress(forge)
stub_hosts_on(machine, 'forge.puppetlabs.com' => @forge_ip)
stub_hosts_on(machine, 'forgeapi.puppetlabs.com' => @forge_ip)
end
# This wraps the method `stub_hosts` and makes the stub specific to
# the forge alias.
#
# @see #stub_forge_on
def stub_forge
stub_forge_on(default)
end
def sleep_until_puppetdb_started(host)
curl_with_retries("start puppetdb", host, "http://localhost:8080", 0, 120)
curl_with_retries("start puppetdb (ssl)",
host, "https://#{host.node_name}:8081", [35, 60])
end
def curl_with_retries(desc, host, url, desired_exit_codes, max_retries = 60, retry_interval = 1)
retry_command(desc, host, "curl #{url}", desired_exit_codes, max_retries, retry_interval)
end
def retry_command(desc, host, command, desired_exit_codes = 0, max_retries = 60, retry_interval = 1)
desired_exit_codes = [desired_exit_codes].flatten
result = on host, command, :acceptable_exit_codes => (0...127)
num_retries = 0
until desired_exit_codes.include?(result.exit_code)
sleep retry_interval
result = on host, command, :acceptable_exit_codes => (0...127)
num_retries += 1
if (num_retries > max_retries)
fail("Unable to #{desc}")
end
end
end
#stops the puppet agent running on the host
def stop_agent_on(agent)
vardir = agent.puppet['vardir']
agent_running = true
while agent_running
result = on agent, "[ -e '#{vardir}/state/agent_catalog_run.lock' ]", :acceptable_exit_codes => [0,1]
agent_running = (result.exit_code == 0)
sleep 2 unless agent_running
end
if agent['platform'].include?('solaris')
on(agent, '/usr/sbin/svcadm disable -s svc:/network/pe-puppet:default')
elsif agent['platform'].include?('aix')
on(agent, '/usr/bin/stopsrc -s pe-puppet')
elsif agent['platform'].include?('windows')
on(agent, 'net stop pe-puppet', :acceptable_exit_codes => [0,2])
else
# For the sake of not passing the PE version into this method,
# we just query the system to find out which service we want to
# stop
result = on agent, "[ -e /etc/init.d/pe-puppet-agent ]", :acceptable_exit_codes => [0,1]
service = (result.exit_code == 0) ? 'pe-puppet-agent' : 'pe-puppet'
on(agent, "/etc/init.d/#{service} stop")
end
end
#stops the puppet agent running on the default host
# @see #stop_agent_on
def stop_agent
stop_agent_on(default)
end
#wait for a given host to appear in the dashboard
def wait_for_host_in_dashboard(host)
hostname = host.node_name
retry_command("Wait for #{hostname} to be in the console", dashboard, "! curl --sslv3 -k -I https://#{dashboard}/nodes/#{hostname} | grep '404 Not Found'")
end
# Ensure the host has requested a cert, then sign it
#
# @param [Host] host The host to sign for
#
# @return nil
# @raise [FailTest] if process times out
def sign_certificate_for(host)
if [master, dashboard, database].include? host
on host, puppet( 'agent -t' ), :acceptable_exit_codes => [0,1,2]
on master, puppet( "cert --allow-dns-alt-names sign #{host}" ), :acceptable_exit_codes => [0,24]
else
hostname = Regexp.escape host.node_name
last_sleep = 0
next_sleep = 1
(0..10).each do |i|
fail_test("Failed to sign cert for #{hostname}") if i == 10
on master, puppet("cert --sign --all"), :acceptable_exit_codes => [0,24]
break if on(master, puppet("cert --list --all")).stdout =~ /\+ "?#{hostname}"?/
sleep next_sleep
(last_sleep, next_sleep) = next_sleep, last_sleep+next_sleep
end
end
end
#prompt the master to sign certs then check to confirm the cert for the default host is signed
#@see #sign_certificate_for
def sign_certificate
sign_certificate_for(default)
end
# Get a facter fact from a provided host
#
# @param [Host] host The host to query the fact for
# @param [String] name The name of the fact to query for
# @!macro common_opts
#
# @return String The value of the fact 'name' on the provided host
# @raise [FailTest] Raises an exception if call to facter fails
def fact_on(host, name, opts = {})
result = on host, facter(name, opts)
result.stdout.chomp if result.stdout
end
# Get a facter fact from the default host
# @see #fact_on
def fact(name, opts = {})
fact_on(default, name, opts)
end
end
end
end
| 1 | 5,325 | We'd lose the data here from the original teardown_exception and only get the dump_exception - I fear that that could make the actual error difficult to track if it cascades. | voxpupuli-beaker | rb |
@@ -266,6 +266,9 @@ function diffElementNodes(dom, newVNode, oldVNode, context, isSvg, excessDomChil
dom.innerHTML = newHtml && newHtml.__html || '';
}
}
+ if (newVNode.props.multiple && newVNode.type==='select') {
+ dom.multiple = newVNode.props.multiple;
+ }
diffChildren(dom, newVNode, oldVNode, context, newVNode.type==='foreignObject' ? false : isSvg, excessDomChildren, mounts, ancestorComponent);
diffProps(dom, newVNode.props, oldProps, isSvg);
} | 1 | import { EMPTY_OBJ, EMPTY_ARR } from '../constants';
import { Component, enqueueRender } from '../component';
import { coerceToVNode, Fragment } from '../create-element';
import { diffChildren } from './children';
import { diffProps } from './props';
import { assign } from '../util';
import options from '../options';
/**
* Diff two virtual nodes and apply proper changes to the DOM
* @param {import('../internal').PreactElement | Text} dom The DOM element representing
* the virtual nodes under diff
* @param {import('../internal').PreactElement} parentDom The parent of the DOM element
* @param {import('../internal').VNode | null} newVNode The new virtual node
* @param {import('../internal').VNode | null} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this element is an SVG node
* @param {Array<import('../internal').PreactElement>} excessDomChildren
* @param {Array<import('../internal').Component>} mounts A list of newly
* mounted components
* @param {import('../internal').Component | null} ancestorComponent The direct
* parent component
*/
export function diff(dom, parentDom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, ancestorComponent) {
// If the previous type doesn't match the new type we drop the whole subtree
if (oldVNode==null || newVNode==null || oldVNode.type!==newVNode.type) {
if (oldVNode!=null) unmount(oldVNode, ancestorComponent);
if (newVNode==null) return null;
dom = null;
oldVNode = EMPTY_OBJ;
}
if (options.diff) options.diff(newVNode);
let c, p, isNew = false, oldProps, oldState, oldContext,
newType = newVNode.type;
/** @type {import('../internal').Component | null} */
let clearProcessingException;
try {
outer: if (oldVNode.type===Fragment || newType===Fragment) {
diffChildren(parentDom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, c);
if (newVNode._children.length) {
dom = newVNode._children[0]._dom;
newVNode._lastDomChild = newVNode._children[newVNode._children.length - 1]._dom;
}
}
else if (typeof newType==='function') {
// Necessary for createContext api. Setting this property will pass
// the context value as `this.context` just for this component.
let cxType = newType.contextType;
let provider = cxType && context[cxType._id];
let cctx = cxType != null ? (provider ? provider.props.value : cxType._defaultValue) : context;
// Get component and set it to `c`
if (oldVNode._component) {
c = newVNode._component = oldVNode._component;
clearProcessingException = c._processingException;
}
else {
isNew = true;
// Instantiate the new component
if (newType.prototype && newType.prototype.render) {
newVNode._component = c = new newType(newVNode.props, cctx); // eslint-disable-line new-cap
}
else {
newVNode._component = c = new Component(newVNode.props, cctx);
c.constructor = newType;
c.render = doRender;
}
c._ancestorComponent = ancestorComponent;
if (provider) provider.sub(c);
c.props = newVNode.props;
if (!c.state) c.state = {};
c.context = cctx;
c._context = context;
c._dirty = true;
c._renderCallbacks = [];
}
c._vnode = newVNode;
// Invoke getDerivedStateFromProps
let s = c._nextState || c.state;
if (newType.getDerivedStateFromProps!=null) {
oldState = assign({}, c.state);
if (s===c.state) s = assign({}, s);
assign(s, newType.getDerivedStateFromProps(newVNode.props, s));
}
// Invoke pre-render lifecycle methods
if (isNew) {
if (newType.getDerivedStateFromProps==null && c.componentWillMount!=null) c.componentWillMount();
if (c.componentDidMount!=null) mounts.push(c);
}
else {
if (newType.getDerivedStateFromProps==null && c._force==null && c.componentWillReceiveProps!=null) {
c.componentWillReceiveProps(newVNode.props, cctx);
s = c._nextState || c.state;
}
if (!c._force && c.shouldComponentUpdate!=null && c.shouldComponentUpdate(newVNode.props, s, cctx)===false) {
c.props = newVNode.props;
c.state = s;
c._dirty = false;
break outer;
}
if (c.componentWillUpdate!=null) {
c.componentWillUpdate(newVNode.props, s, cctx);
}
}
oldProps = c.props;
if (!oldState) oldState = c.state;
oldContext = c.context = cctx;
c.props = newVNode.props;
c.state = s;
if (options.render) options.render(newVNode);
let prev = c._prevVNode;
let vnode = c._prevVNode = coerceToVNode(c.render(c.props, c.state, c.context));
c._dirty = false;
if (c.getChildContext!=null) {
context = assign(assign({}, context), c.getChildContext());
}
if (!isNew && c.getSnapshotBeforeUpdate!=null) {
oldContext = c.getSnapshotBeforeUpdate(oldProps, oldState);
}
c.base = dom = diff(dom, parentDom, vnode, prev, context, isSvg, excessDomChildren, mounts, c);
if (vnode!=null) {
// If this component returns a Fragment (or another component that
// returns a Fragment), then _lastDomChild will be non-null,
// informing `diffChildren` to diff this component's VNode like a Fragemnt
newVNode._lastDomChild = vnode._lastDomChild;
}
c._parentDom = parentDom;
if (newVNode.ref) applyRef(newVNode.ref, c, ancestorComponent);
}
else {
dom = diffElementNodes(dom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, ancestorComponent);
if (newVNode.ref && (oldVNode.ref !== newVNode.ref)) {
applyRef(newVNode.ref, dom, ancestorComponent);
}
}
newVNode._dom = dom;
if (c!=null) {
while (p=c._renderCallbacks.pop()) p.call(c);
// Don't call componentDidUpdate on mount or when we bailed out via
// `shouldComponentUpdate`
if (!isNew && oldProps!=null && c.componentDidUpdate!=null) {
c.componentDidUpdate(oldProps, oldState, oldContext);
}
}
if (clearProcessingException) {
c._processingException = null;
}
if (options.diffed) options.diffed(newVNode);
}
catch (e) {
catchErrorInComponent(e, ancestorComponent);
}
return dom;
}
export function commitRoot(mounts, root) {
let c;
while ((c = mounts.pop())) {
try {
c.componentDidMount();
}
catch (e) {
catchErrorInComponent(e, c._ancestorComponent);
}
}
if (options.commit) options.commit(root);
}
/**
* Diff two virtual nodes representing DOM element
* @param {import('../internal').PreactElement} dom The DOM element representing
* the virtual nodes being diffed
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').VNode} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this DOM node is an SVG node
* @param {*} excessDomChildren
* @param {Array<import('../internal').Component>} mounts An array of newly
* mounted components
* @param {import('../internal').Component} ancestorComponent The parent
* component to the ones being diffed
* @returns {import('../internal').PreactElement}
*/
function diffElementNodes(dom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, ancestorComponent) {
let d = dom;
// Tracks entering and exiting SVG namespace when descending through the tree.
isSvg = newVNode.type==='svg' || isSvg;
if (dom==null && excessDomChildren!=null) {
for (let i=0; i<excessDomChildren.length; i++) {
const child = excessDomChildren[i];
if (child!=null && (newVNode.type===null ? child.nodeType===3 : child.localName===newVNode.type)) {
dom = child;
excessDomChildren[i] = null;
break;
}
}
}
if (dom==null) {
dom = newVNode.type===null ? document.createTextNode(newVNode.text) : isSvg ? document.createElementNS('http://www.w3.org/2000/svg', newVNode.type) : document.createElement(newVNode.type);
// we created a new parent, so none of the previously attached children can be reused:
excessDomChildren = null;
}
newVNode._dom = dom;
if (newVNode.type===null) {
if ((d===null || dom===d) && newVNode.text!==oldVNode.text) {
dom.data = newVNode.text;
}
}
else {
if (excessDomChildren!=null && dom.childNodes!=null) {
excessDomChildren = EMPTY_ARR.slice.call(dom.childNodes);
}
if (newVNode!==oldVNode) {
let oldProps = oldVNode.props;
// if we're hydrating, use the element's attributes as its current props:
if (oldProps==null) {
oldProps = {};
if (excessDomChildren!=null) {
for (let i=0; i<dom.attributes.length; i++) {
oldProps[dom.attributes[i].name] = dom.attributes[i].value;
}
}
}
let oldHtml = oldProps.dangerouslySetInnerHTML;
let newHtml = newVNode.props.dangerouslySetInnerHTML;
if (newHtml || oldHtml) {
// Avoid re-applying the same '__html' if it did not changed between re-render
if (!newHtml || !oldHtml || newHtml.__html!=oldHtml.__html) {
dom.innerHTML = newHtml && newHtml.__html || '';
}
}
diffChildren(dom, newVNode, oldVNode, context, newVNode.type==='foreignObject' ? false : isSvg, excessDomChildren, mounts, ancestorComponent);
diffProps(dom, newVNode.props, oldProps, isSvg);
}
}
return dom;
}
/**
* Invoke or update a ref, depending on whether it is a function or object ref.
* @param {object|function} [ref=null]
* @param {any} [value]
*/
export function applyRef(ref, value, ancestorComponent) {
try {
if (typeof ref=='function') ref(value);
else ref.current = value;
}
catch (e) {
catchErrorInComponent(e, ancestorComponent);
}
}
/**
* Unmount a virtual node from the tree and apply DOM changes
* @param {import('../internal').VNode} vnode The virtual node to unmount
* @param {import('../internal').Component} ancestorComponent The parent
* component to this virtual node
*/
export function unmount(vnode, ancestorComponent) {
let r;
if (options.unmount) options.unmount(vnode);
if (r = vnode.ref) {
applyRef(r, null, ancestorComponent);
}
if ((r = vnode._dom)!=null) r.remove();
vnode._dom = vnode._lastDomChild = null;
if ((r = vnode._component)!=null) {
if (r.componentWillUnmount) {
try {
r.componentWillUnmount();
}
catch (e) {
catchErrorInComponent(e, ancestorComponent);
}
}
r.base = r._parentDom = null;
if (r = r._prevVNode) unmount(r, ancestorComponent);
}
else if (r = vnode._children) {
for (let i = 0; i < r.length; i++) {
unmount(r[i], ancestorComponent);
}
}
}
/** The `.render()` method for a PFC backing instance. */
function doRender(props, state, context) {
return this.constructor(props, context);
}
/**
* Find the closest error boundary to a thrown error and call it
* @param {object} error The thrown value
* @param {import('../internal').Component} component The first ancestor
* component check for error boundary behaviors
*/
function catchErrorInComponent(error, component) {
for (; component; component = component._ancestorComponent) {
if (!component._processingException) {
try {
if (component.constructor.getDerivedStateFromError!=null) {
component.setState(component.constructor.getDerivedStateFromError(error));
}
else if (component.componentDidCatch!=null) {
component.componentDidCatch(error);
}
else {
continue;
}
return enqueueRender(component._processingException = component);
}
catch (e) {
error = e;
}
}
}
throw error;
}
| 1 | 12,667 | At this point in `diff()` we should know if we're dealing with a `component` or a native `html` element. We can reuse that information in this if-statement here :+1: | preactjs-preact | js |
@@ -215,7 +215,7 @@ func (p *Protocol) GrantEpochReward(
}
// Reward additional bootstrap bonus
- if epochNum <= a.foundationBonusLastEpoch {
+ if epochNum <= a.foundationBonusLastEpoch || (epochNum >= a.foundationBonusP2StartEpoch && epochNum <= a.foundationBonusP2EndEpoch) {
for i, count := 0, uint64(0); i < len(candidates) && count < a.numDelegatesForFoundationBonus; i++ {
if _, ok := exemptAddrs[candidates[i].Address]; ok {
continue | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package rewarding
import (
"context"
"math/big"
"github.com/golang/protobuf/proto"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/poll"
"github.com/iotexproject/iotex-core/action/protocol/rewarding/rewardingpb"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/pkg/enc"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/state"
)
// rewardHistory is the dummy struct to record a reward. Only key matters.
type rewardHistory struct{}
// Serialize serializes reward history state into bytes
func (b rewardHistory) Serialize() ([]byte, error) {
gen := rewardingpb.RewardHistory{}
return proto.Marshal(&gen)
}
// Deserialize deserializes bytes into reward history state
func (b *rewardHistory) Deserialize(data []byte) error { return nil }
// rewardHistory stores the unclaimed balance of an account
type rewardAccount struct {
balance *big.Int
}
// Serialize serializes account state into bytes
func (a rewardAccount) Serialize() ([]byte, error) {
gen := rewardingpb.Account{
Balance: a.balance.String(),
}
return proto.Marshal(&gen)
}
// Deserialize deserializes bytes into account state
func (a *rewardAccount) Deserialize(data []byte) error {
gen := rewardingpb.Account{}
if err := proto.Unmarshal(data, &gen); err != nil {
return err
}
balance, ok := big.NewInt(0).SetString(gen.Balance, 10)
if !ok {
return errors.New("failed to set reward account balance")
}
a.balance = balance
return nil
}
// GrantBlockReward grants the block reward (token) to the block producer
func (p *Protocol) GrantBlockReward(
ctx context.Context,
sm protocol.StateManager,
) (*action.Log, error) {
actionCtx := protocol.MustGetActionCtx(ctx)
blkCtx := protocol.MustGetBlockCtx(ctx)
if err := p.assertNoRewardYet(sm, blockRewardHistoryKeyPrefix, blkCtx.BlockHeight); err != nil {
return nil, err
}
producerAddrStr := blkCtx.Producer.String()
rewardAddrStr := ""
pp := poll.FindProtocol(protocol.MustGetRegistry(ctx))
if pp != nil {
candidates, err := pp.Candidates(ctx, sm)
if err != nil {
return nil, err
}
for _, candidate := range candidates {
if candidate.Address == producerAddrStr {
rewardAddrStr = candidate.RewardAddress
break
}
}
}
// If reward address doesn't exist, do nothing
if rewardAddrStr == "" {
log.S().Debugf("Producer %s doesn't have a reward address", producerAddrStr)
return nil, nil
}
rewardAddr, err := address.FromString(rewardAddrStr)
a := admin{}
if err := p.state(sm, adminKey, &a); err != nil {
return nil, err
}
if err := p.updateAvailableBalance(sm, a.blockReward); err != nil {
return nil, err
}
if err != nil {
return nil, err
}
if err := p.grantToAccount(sm, rewardAddr, a.blockReward); err != nil {
return nil, err
}
if err := p.updateRewardHistory(sm, blockRewardHistoryKeyPrefix, blkCtx.BlockHeight); err != nil {
return nil, err
}
rewardLog := rewardingpb.RewardLog{
Type: rewardingpb.RewardLog_BLOCK_REWARD,
Addr: rewardAddrStr,
Amount: a.blockReward.String(),
}
data, err := proto.Marshal(&rewardLog)
if err != nil {
return nil, err
}
return &action.Log{
Address: p.addr.String(),
Topics: nil,
Data: data,
BlockHeight: blkCtx.BlockHeight,
ActionHash: actionCtx.ActionHash,
}, nil
}
// GrantEpochReward grants the epoch reward (token) to all beneficiaries of a epoch
func (p *Protocol) GrantEpochReward(
ctx context.Context,
sm protocol.StateManager,
) ([]*action.Log, error) {
actionCtx := protocol.MustGetActionCtx(ctx)
blkCtx := protocol.MustGetBlockCtx(ctx)
bcCtx := protocol.MustGetBlockchainCtx(ctx)
hu := config.NewHeightUpgrade(&bcCtx.Genesis)
rp := rolldpos.MustGetProtocol(protocol.MustGetRegistry(ctx))
epochNum := rp.GetEpochNum(blkCtx.BlockHeight)
if err := p.assertNoRewardYet(sm, epochRewardHistoryKeyPrefix, epochNum); err != nil {
return nil, err
}
if err := p.assertLastBlockInEpoch(blkCtx.BlockHeight, epochNum, rp); err != nil {
return nil, err
}
a := admin{}
if err := p.state(sm, adminKey, &a); err != nil {
return nil, err
}
// Get the delegate list who exempts epoch reward
e := exempt{}
if err := p.state(sm, exemptKey, &e); err != nil {
return nil, err
}
exemptAddrs := make(map[string]interface{})
for _, addr := range e.addrs {
exemptAddrs[addr.String()] = nil
}
var err error
uqd := make(map[string]bool)
epochStartHeight := rp.GetEpochHeight(epochNum)
if hu.IsPre(config.Easter, epochStartHeight) {
// Get unqualified delegate list
if uqd, err = p.unqualifiedDelegates(ctx, sm, rp, epochNum, a.productivityThreshold); err != nil {
return nil, err
}
}
candidates, err := poll.MustGetProtocol(protocol.MustGetRegistry(ctx)).Candidates(ctx, sm)
if err != nil {
return nil, err
}
addrs, amounts, err := p.splitEpochReward(epochStartHeight, sm, candidates, a.epochReward, a.numDelegatesForEpochReward, exemptAddrs, uqd)
if err != nil {
return nil, err
}
actualTotalReward := big.NewInt(0)
rewardLogs := make([]*action.Log, 0)
for i := range addrs {
// If reward address doesn't exist, do nothing
if addrs[i] == nil {
continue
}
// If 0 epoch reward due to low productivity, do nothing
if amounts[i].Cmp(big.NewInt(0)) == 0 {
continue
}
if err := p.grantToAccount(sm, addrs[i], amounts[i]); err != nil {
return nil, err
}
rewardLog := rewardingpb.RewardLog{
Type: rewardingpb.RewardLog_EPOCH_REWARD,
Addr: addrs[i].String(),
Amount: amounts[i].String(),
}
data, err := proto.Marshal(&rewardLog)
if err != nil {
return nil, err
}
rewardLogs = append(rewardLogs, &action.Log{
Address: p.addr.String(),
Topics: nil,
Data: data,
BlockHeight: blkCtx.BlockHeight,
ActionHash: actionCtx.ActionHash,
})
actualTotalReward = big.NewInt(0).Add(actualTotalReward, amounts[i])
}
// Reward additional bootstrap bonus
if epochNum <= a.foundationBonusLastEpoch {
for i, count := 0, uint64(0); i < len(candidates) && count < a.numDelegatesForFoundationBonus; i++ {
if _, ok := exemptAddrs[candidates[i].Address]; ok {
continue
}
if candidates[i].Votes.Cmp(big.NewInt(0)) == 0 {
// hard probation
continue
}
count++
// If reward address doesn't exist, do nothing
if candidates[i].RewardAddress == "" {
log.S().Warnf("Candidate %s doesn't have a reward address", candidates[i].Address)
continue
}
rewardAddr, err := address.FromString(candidates[i].RewardAddress)
if err != nil {
return nil, err
}
if err := p.grantToAccount(sm, rewardAddr, a.foundationBonus); err != nil {
return nil, err
}
rewardLog := rewardingpb.RewardLog{
Type: rewardingpb.RewardLog_FOUNDATION_BONUS,
Addr: candidates[i].RewardAddress,
Amount: a.foundationBonus.String(),
}
data, err := proto.Marshal(&rewardLog)
if err != nil {
return nil, err
}
rewardLogs = append(rewardLogs, &action.Log{
Address: p.addr.String(),
Topics: nil,
Data: data,
BlockHeight: blkCtx.BlockHeight,
ActionHash: actionCtx.ActionHash,
})
actualTotalReward = big.NewInt(0).Add(actualTotalReward, a.foundationBonus)
}
}
// Update actual reward
if err := p.updateAvailableBalance(sm, actualTotalReward); err != nil {
return nil, err
}
if err := p.updateRewardHistory(sm, epochRewardHistoryKeyPrefix, epochNum); err != nil {
return nil, err
}
return rewardLogs, nil
}
// Claim claims the token from the rewarding fund
func (p *Protocol) Claim(
ctx context.Context,
sm protocol.StateManager,
amount *big.Int,
) error {
actionCtx := protocol.MustGetActionCtx(ctx)
if err := p.assertAmount(amount); err != nil {
return err
}
if err := p.updateTotalBalance(sm, amount); err != nil {
return err
}
return p.claimFromAccount(sm, actionCtx.Caller, amount)
}
// UnclaimedBalance returns unclaimed balance of a given address
func (p *Protocol) UnclaimedBalance(
ctx context.Context,
sm protocol.StateReader,
addr address.Address,
) (*big.Int, error) {
acc := rewardAccount{}
accKey := append(adminKey, addr.Bytes()...)
err := p.state(sm, accKey, &acc)
if err == nil {
return acc.balance, nil
}
if errors.Cause(err) == state.ErrStateNotExist {
return big.NewInt(0), nil
}
return nil, err
}
func (p *Protocol) updateTotalBalance(sm protocol.StateManager, amount *big.Int) error {
f := fund{}
if err := p.state(sm, fundKey, &f); err != nil {
return err
}
totalBalance := big.NewInt(0).Sub(f.totalBalance, amount)
if totalBalance.Cmp(big.NewInt(0)) < 0 {
return errors.New("no enough total balance")
}
f.totalBalance = totalBalance
return p.putState(sm, fundKey, &f)
}
func (p *Protocol) updateAvailableBalance(sm protocol.StateManager, amount *big.Int) error {
f := fund{}
if err := p.state(sm, fundKey, &f); err != nil {
return err
}
availableBalance := big.NewInt(0).Sub(f.unclaimedBalance, amount)
if availableBalance.Cmp(big.NewInt(0)) < 0 {
return errors.New("no enough available balance")
}
f.unclaimedBalance = availableBalance
return p.putState(sm, fundKey, &f)
}
func (p *Protocol) grantToAccount(sm protocol.StateManager, addr address.Address, amount *big.Int) error {
acc := rewardAccount{}
accKey := append(adminKey, addr.Bytes()...)
if err := p.state(sm, accKey, &acc); err != nil {
if errors.Cause(err) != state.ErrStateNotExist {
return err
}
acc = rewardAccount{
balance: big.NewInt(0),
}
}
acc.balance = big.NewInt(0).Add(acc.balance, amount)
return p.putState(sm, accKey, &acc)
}
func (p *Protocol) claimFromAccount(sm protocol.StateManager, addr address.Address, amount *big.Int) error {
// Update reward account
acc := rewardAccount{}
accKey := append(adminKey, addr.Bytes()...)
if err := p.state(sm, accKey, &acc); err != nil {
return err
}
balance := big.NewInt(0).Sub(acc.balance, amount)
if balance.Cmp(big.NewInt(0)) < 0 {
return errors.New("no enough available balance")
}
// TODO: we may want to delete the account when the unclaimed balance becomes 0
acc.balance = balance
if err := p.putState(sm, accKey, &acc); err != nil {
return err
}
// Update primary account
primAcc, err := accountutil.LoadOrCreateAccount(sm, addr.String())
if err != nil {
return err
}
primAcc.Balance = big.NewInt(0).Add(primAcc.Balance, amount)
return accountutil.StoreAccount(sm, addr.String(), primAcc)
}
func (p *Protocol) updateRewardHistory(sm protocol.StateManager, prefix []byte, index uint64) error {
var indexBytes [8]byte
enc.MachineEndian.PutUint64(indexBytes[:], index)
return p.putState(sm, append(prefix, indexBytes[:]...), &rewardHistory{})
}
func (p *Protocol) splitEpochReward(
epochStartHeight uint64,
sm protocol.StateManager,
candidates []*state.Candidate,
totalAmount *big.Int,
numDelegatesForEpochReward uint64,
exemptAddrs map[string]interface{},
uqd map[string]bool,
) ([]address.Address, []*big.Int, error) {
filteredCandidates := make([]*state.Candidate, 0)
for _, candidate := range candidates {
if _, ok := exemptAddrs[candidate.Address]; ok {
continue
}
filteredCandidates = append(filteredCandidates, candidate)
}
candidates = filteredCandidates
if len(candidates) == 0 {
return nil, nil, nil
}
// We at most allow numDelegatesForEpochReward delegates to get the epoch reward
if uint64(len(candidates)) > numDelegatesForEpochReward {
candidates = candidates[:numDelegatesForEpochReward]
}
totalWeight := big.NewInt(0)
rewardAddrs := make([]address.Address, 0)
for _, candidate := range candidates {
var rewardAddr address.Address
var err error
if candidate.RewardAddress != "" {
rewardAddr, err = address.FromString(candidate.RewardAddress)
if err != nil {
return nil, nil, err
}
} else {
log.S().Warnf("Candidate %s doesn't have a reward address", candidate.Address)
}
rewardAddrs = append(rewardAddrs, rewardAddr)
totalWeight = big.NewInt(0).Add(totalWeight, candidate.Votes)
}
amounts := make([]*big.Int, 0)
var amountPerAddr *big.Int
for _, candidate := range candidates {
if totalWeight.Cmp(big.NewInt(0)) == 0 {
amounts = append(amounts, big.NewInt(0))
continue
}
if _, ok := uqd[candidate.Address]; ok {
// Before Easter, if not qualified, skip the epoch reward
amounts = append(amounts, big.NewInt(0))
continue
}
amountPerAddr = big.NewInt(0).Div(big.NewInt(0).Mul(totalAmount, candidate.Votes), totalWeight)
amounts = append(amounts, amountPerAddr)
}
return rewardAddrs, amounts, nil
}
func (p *Protocol) unqualifiedDelegates(
ctx context.Context,
sm protocol.StateManager,
rp *rolldpos.Protocol,
epochNum uint64,
productivityThreshold uint64,
) (map[string]bool, error) {
blkCtx := protocol.MustGetBlockCtx(ctx)
bcCtx := protocol.MustGetBlockchainCtx(ctx)
delegates, err := poll.MustGetProtocol(protocol.MustGetRegistry(ctx)).Delegates(ctx, sm)
if err != nil {
return nil, err
}
unqualifiedDelegates := make(map[string]bool, 0)
numBlks, produce, err := rp.ProductivityByEpoch(epochNum, bcCtx.Tip.Height, p.productivity)
if err != nil {
return nil, err
}
// The current block is not included, so add it
numBlks++
if _, ok := produce[blkCtx.Producer.String()]; ok {
produce[blkCtx.Producer.String()]++
} else {
produce[blkCtx.Producer.String()] = 1
}
for _, abp := range delegates {
if _, ok := produce[abp.Address]; !ok {
produce[abp.Address] = 0
}
}
expectedNumBlks := numBlks / uint64(len(produce))
for addr, actualNumBlks := range produce {
if actualNumBlks*100/expectedNumBlks < productivityThreshold {
unqualifiedDelegates[addr] = true
}
}
return unqualifiedDelegates, nil
}
func (p *Protocol) assertNoRewardYet(sm protocol.StateManager, prefix []byte, index uint64) error {
history := rewardHistory{}
var indexBytes [8]byte
enc.MachineEndian.PutUint64(indexBytes[:], index)
err := p.state(sm, append(prefix, indexBytes[:]...), &history)
if err == nil {
return errors.Errorf("reward history already exists on index %d", index)
}
if errors.Cause(err) != state.ErrStateNotExist {
return err
}
return nil
}
func (p *Protocol) assertLastBlockInEpoch(blkHeight uint64, epochNum uint64, rp *rolldpos.Protocol) error {
lastBlkHeight := rp.GetEpochLastBlockHeight(epochNum)
if blkHeight != lastBlkHeight {
return errors.Errorf("current block %d is not the last block of epoch %d", blkHeight, epochNum)
}
return nil
}
| 1 | 21,703 | somewhere in reward protocol, we need to do if epoch is Fairbank { a.foundationBonusP2StartEpoch = genesis.xxx a.foundationBonusP2EndEpoch = genesis.yyy p.state(sm, adminKey, &a) } that is to write the 2 new values at Fairbank height then this logic can follow | iotexproject-iotex-core | go |
@@ -200,6 +200,10 @@ if __name__ == '__main__':
inventory_dao.initialize(SQL_ENGINE)
scanner_dao.initialize(SQL_ENGINE)
+ # Drop and recreate the CloudAssetInventory table
+ inventory_dao.CaiTemporaryStore.__table__.drop(SQL_ENGINE)
+ inventory_dao.CaiTemporaryStore.__table__.create(SQL_ENGINE)
+
# Find all the child classes inherited from declarative base class.
SCANNER_DAO_CLASSES = _find_subclasses(scanner_dao.BASE)
| 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forseti db migrator."""
from __future__ import print_function
import sys
# Importing migrate.changeset adds some new methods to existing SQLAlchemy
# objects but we will not be calling the library directly.
import migrate.changeset # noqa: F401, pylint: disable=unused-import
from sqlalchemy.exc import OperationalError
import google.cloud.forseti.services.scanner.dao as scanner_dao
import google.cloud.forseti.services.inventory.storage as inventory_dao
import google.cloud.forseti.services.dao as general_dao
from google.cloud.forseti.common.util import logger
DEFAULT_DB_CONN_STR = 'mysql://[email protected]:3306/forseti_security'
LOGGER = logger.get_logger(__name__)
class ColumnAction(object):
"""Column action class."""
DROP = 'DROP'
CREATE = 'CREATE'
ALTER = 'ALTER'
def create_column(table, column):
"""Create Column.
Args:
table (sqlalchemy.schema.Table): The sql alchemy table object.
column (sqlalchemy.schema.Column): The sql alchemy column object.
"""
LOGGER.info('Attempting to create column: %s', column.name)
column.create(table, populate_default=True)
def alter_column(table, old_column, new_column):
"""Alter Column.
Args:
table (sqlalchemy.schema.Table): The sql alchemy table object.
old_column (sqlalchemy.schema.Column): The sql alchemy column object,
this is the column to be modified.
new_column (sqlalchemy.schema.Column): The sql alchemy column object,
this is the column to update to.
"""
LOGGER.info('Attempting to alter column: %s', old_column.name)
# bind the old column with the corresponding table.
old_column.table = table
old_column.alter(name=new_column.name,
type=new_column.type,
nullable=new_column.nullable)
def drop_column(table, column):
"""Create Column.
Args:
table (sqlalchemy.schema.Table): The sql alchemy table object.
column (sqlalchemy.schema.Column): The sql alchemy column object.
"""
LOGGER.info('Attempting to drop column: %s', column.name)
column.drop(table)
COLUMN_ACTION_MAPPING = {ColumnAction.DROP: drop_column,
ColumnAction.CREATE: create_column,
ColumnAction.ALTER: alter_column}
def migrate_schema(base, dao_classes):
"""Migrate database schema.
Args:
base (Base): Declarative base.
dao_classes (list): A list of dao classes.
"""
# Find all the Table objects for each of the classes.
# The format of tables is: {table_name: Table object}.
tables = base.metadata.tables
schema_update_actions_method = 'get_schema_update_actions'
for dao_class in dao_classes:
get_schema_update_actions = getattr(dao_class,
schema_update_actions_method,
None)
if (not callable(get_schema_update_actions) or
dao_class.__tablename__ not in tables):
LOGGER.warn('Method: %s is not callable or Table: %s doesn\'t '
'exist', schema_update_actions_method,
dao_class.__tablename__)
continue
LOGGER.info('Updating table %s', dao_class.__tablename__)
# schema_update will require the Table object.
table = tables.get(dao_class.__tablename__)
schema_update_actions = get_schema_update_actions()
for column_action, columns in schema_update_actions.iteritems():
if column_action in [ColumnAction.CREATE, ColumnAction.DROP]:
_create_or_drop_columns(column_action, columns, table)
elif column_action in [ColumnAction.ALTER]:
_alter_columns(column_action, columns, table)
else:
LOGGER.warn('Unknown column action: %s', column_action)
def _alter_columns(column_action, columns, table):
"""Alter columns.
Args:
column_action (str): Column Action.
columns (dict): A dictionary of old_column: new_column.
table (sqlalchemy.schema.Table): The sql alchemy table object.
"""
column_action = column_action.upper()
for old_column, new_column in columns.iteritems():
try:
COLUMN_ACTION_MAPPING.get(column_action)(table,
old_column,
new_column)
except OperationalError:
LOGGER.info('Failed to update db schema, table=%s',
table.name)
except Exception: # pylint: disable=broad-except
LOGGER.exception(
'Unexpected error happened when attempting '
'to update database schema, table: %s',
table.name)
def _create_or_drop_columns(column_action, columns, table):
"""Create or drop columns.
Args:
column_action (str): Column Action.
columns (list): A list of columns.
table (sqlalchemy.schema.Table): The sql alchemy table object.
"""
column_action = column_action.upper()
for column in columns:
try:
COLUMN_ACTION_MAPPING.get(column_action)(table,
column)
except OperationalError:
LOGGER.info('Failed to update db schema, table=%s',
table.name)
except Exception: # pylint: disable=broad-except
LOGGER.exception(
'Unexpected error happened when attempting '
'to update database schema, table: %s',
table.name)
def _find_subclasses(cls):
"""Find all the subclasses of a class.
Args:
cls (class): The parent class.
Returns:
list: Subclasses of the given parent class.
"""
results = []
for subclass in cls.__subclasses__():
results.append(subclass)
return results
if __name__ == '__main__':
# If the DB connection string is passed in, use that, otherwise
# fall back to the default DB connection string.
print (sys.argv)
DB_CONN_STR = sys.argv[1] if len(sys.argv) > 1 else DEFAULT_DB_CONN_STR
SQL_ENGINE = general_dao.create_engine(DB_CONN_STR,
pool_recycle=3600)
# Create tables if not exists.
inventory_dao.initialize(SQL_ENGINE)
scanner_dao.initialize(SQL_ENGINE)
# Find all the child classes inherited from declarative base class.
SCANNER_DAO_CLASSES = _find_subclasses(scanner_dao.BASE)
INVENTORY_DAO_CLASSES = _find_subclasses(inventory_dao.BASE)
INVENTORY_DAO_CLASSES.extend([inventory_dao.CaiTemporaryStore])
DECLARITIVE_BASE_MAPPING = {
scanner_dao.BASE: SCANNER_DAO_CLASSES,
inventory_dao.BASE: INVENTORY_DAO_CLASSES}
for declaritive_base, classes in DECLARITIVE_BASE_MAPPING.iteritems():
declaritive_base.metadata.bind = SQL_ENGINE
migrate_schema(declaritive_base, classes)
| 1 | 32,604 | Dropping and recreating would delete all t he previous records, are they any important? | forseti-security-forseti-security | py |
@@ -50,6 +50,15 @@ func (m *mockCgroupManager) Destroy() error {
return nil
}
+func (m *mockCgroupManager) Exists() bool {
+ paths := m.GetPaths()
+ if paths != nil {
+ _, err := os.Lstat(paths["devices"])
+ return err == nil
+ }
+ return false
+}
+
func (m *mockCgroupManager) GetPaths() map[string]string {
return m.paths
} | 1 | // +build linux
package libcontainer
import (
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/intelrdt"
"github.com/opencontainers/runc/libcontainer/system"
)
type mockCgroupManager struct {
pids []int
allPids []int
stats *cgroups.Stats
paths map[string]string
}
type mockIntelRdtManager struct {
stats *intelrdt.Stats
path string
}
func (m *mockCgroupManager) GetPids() ([]int, error) {
return m.pids, nil
}
func (m *mockCgroupManager) GetAllPids() ([]int, error) {
return m.allPids, nil
}
func (m *mockCgroupManager) GetStats() (*cgroups.Stats, error) {
return m.stats, nil
}
func (m *mockCgroupManager) Apply(pid int) error {
return nil
}
func (m *mockCgroupManager) Set(container *configs.Config) error {
return nil
}
func (m *mockCgroupManager) Destroy() error {
return nil
}
func (m *mockCgroupManager) GetPaths() map[string]string {
return m.paths
}
func (m *mockCgroupManager) Path(subsys string) string {
return m.paths[subsys]
}
func (m *mockCgroupManager) Freeze(state configs.FreezerState) error {
return nil
}
func (m *mockCgroupManager) GetCgroups() (*configs.Cgroup, error) {
return nil, nil
}
func (m *mockCgroupManager) GetFreezerState() (configs.FreezerState, error) {
return configs.Thawed, nil
}
func (m *mockIntelRdtManager) Apply(pid int) error {
return nil
}
func (m *mockIntelRdtManager) GetStats() (*intelrdt.Stats, error) {
return m.stats, nil
}
func (m *mockIntelRdtManager) Destroy() error {
return nil
}
func (m *mockIntelRdtManager) GetPath() string {
return m.path
}
func (m *mockIntelRdtManager) Set(container *configs.Config) error {
return nil
}
func (m *mockIntelRdtManager) GetCgroups() (*configs.Cgroup, error) {
return nil, nil
}
type mockProcess struct {
_pid int
started uint64
}
func (m *mockProcess) terminate() error {
return nil
}
func (m *mockProcess) pid() int {
return m._pid
}
func (m *mockProcess) startTime() (uint64, error) {
return m.started, nil
}
func (m *mockProcess) start() error {
return nil
}
func (m *mockProcess) wait() (*os.ProcessState, error) {
return nil, nil
}
func (m *mockProcess) signal(_ os.Signal) error {
return nil
}
func (m *mockProcess) externalDescriptors() []string {
return []string{}
}
func (m *mockProcess) setExternalDescriptors(newFds []string) {
}
func (m *mockProcess) forwardChildLogs() {
}
func TestGetContainerPids(t *testing.T) {
container := &linuxContainer{
id: "myid",
config: &configs.Config{},
cgroupManager: &mockCgroupManager{allPids: []int{1, 2, 3}},
}
pids, err := container.Processes()
if err != nil {
t.Fatal(err)
}
for i, expected := range []int{1, 2, 3} {
if pids[i] != expected {
t.Fatalf("expected pid %d but received %d", expected, pids[i])
}
}
}
func TestGetContainerStats(t *testing.T) {
container := &linuxContainer{
id: "myid",
config: &configs.Config{},
cgroupManager: &mockCgroupManager{
pids: []int{1, 2, 3},
stats: &cgroups.Stats{
MemoryStats: cgroups.MemoryStats{
Usage: cgroups.MemoryData{
Usage: 1024,
},
},
},
},
intelRdtManager: &mockIntelRdtManager{
stats: &intelrdt.Stats{
L3CacheSchema: "L3:0=f;1=f0",
MemBwSchema: "MB:0=20;1=70",
},
},
}
stats, err := container.Stats()
if err != nil {
t.Fatal(err)
}
if stats.CgroupStats == nil {
t.Fatal("cgroup stats are nil")
}
if stats.CgroupStats.MemoryStats.Usage.Usage != 1024 {
t.Fatalf("expected memory usage 1024 but received %d", stats.CgroupStats.MemoryStats.Usage.Usage)
}
if intelrdt.IsCatEnabled() {
if stats.IntelRdtStats == nil {
t.Fatal("intel rdt stats are nil")
}
if stats.IntelRdtStats.L3CacheSchema != "L3:0=f;1=f0" {
t.Fatalf("expected L3CacheSchema L3:0=f;1=f0 but received %s", stats.IntelRdtStats.L3CacheSchema)
}
}
if intelrdt.IsMbaEnabled() {
if stats.IntelRdtStats == nil {
t.Fatal("intel rdt stats are nil")
}
if stats.IntelRdtStats.MemBwSchema != "MB:0=20;1=70" {
t.Fatalf("expected MemBwSchema MB:0=20;1=70 but received %s", stats.IntelRdtStats.MemBwSchema)
}
}
}
func TestGetContainerState(t *testing.T) {
var (
pid = os.Getpid()
expectedMemoryPath = "/sys/fs/cgroup/memory/myid"
expectedNetworkPath = fmt.Sprintf("/proc/%d/ns/net", pid)
expectedIntelRdtPath = "/sys/fs/resctrl/myid"
)
container := &linuxContainer{
id: "myid",
config: &configs.Config{
Namespaces: []configs.Namespace{
{Type: configs.NEWPID},
{Type: configs.NEWNS},
{Type: configs.NEWNET, Path: expectedNetworkPath},
{Type: configs.NEWUTS},
// emulate host for IPC
//{Type: configs.NEWIPC},
{Type: configs.NEWCGROUP},
},
},
initProcess: &mockProcess{
_pid: pid,
started: 10,
},
cgroupManager: &mockCgroupManager{
pids: []int{1, 2, 3},
stats: &cgroups.Stats{
MemoryStats: cgroups.MemoryStats{
Usage: cgroups.MemoryData{
Usage: 1024,
},
},
},
paths: map[string]string{
"memory": expectedMemoryPath,
},
},
intelRdtManager: &mockIntelRdtManager{
stats: &intelrdt.Stats{
L3CacheSchema: "L3:0=f0;1=f",
MemBwSchema: "MB:0=70;1=20",
},
path: expectedIntelRdtPath,
},
}
container.state = &createdState{c: container}
state, err := container.State()
if err != nil {
t.Fatal(err)
}
if state.InitProcessPid != pid {
t.Fatalf("expected pid %d but received %d", pid, state.InitProcessPid)
}
if state.InitProcessStartTime != 10 {
t.Fatalf("expected process start time 10 but received %d", state.InitProcessStartTime)
}
paths := state.CgroupPaths
if paths == nil {
t.Fatal("cgroup paths should not be nil")
}
if memPath := paths["memory"]; memPath != expectedMemoryPath {
t.Fatalf("expected memory path %q but received %q", expectedMemoryPath, memPath)
}
if intelrdt.IsCatEnabled() || intelrdt.IsMbaEnabled() {
intelRdtPath := state.IntelRdtPath
if intelRdtPath == "" {
t.Fatal("intel rdt path should not be empty")
}
if intelRdtPath != expectedIntelRdtPath {
t.Fatalf("expected intel rdt path %q but received %q", expectedIntelRdtPath, intelRdtPath)
}
}
for _, ns := range container.config.Namespaces {
path := state.NamespacePaths[ns.Type]
if path == "" {
t.Fatalf("expected non nil namespace path for %s", ns.Type)
}
if ns.Type == configs.NEWNET {
if path != expectedNetworkPath {
t.Fatalf("expected path %q but received %q", expectedNetworkPath, path)
}
} else {
file := ""
switch ns.Type {
case configs.NEWNET:
file = "net"
case configs.NEWNS:
file = "mnt"
case configs.NEWPID:
file = "pid"
case configs.NEWIPC:
file = "ipc"
case configs.NEWUSER:
file = "user"
case configs.NEWUTS:
file = "uts"
case configs.NEWCGROUP:
file = "cgroup"
}
expected := fmt.Sprintf("/proc/%d/ns/%s", pid, file)
if expected != path {
t.Fatalf("expected path %q but received %q", expected, path)
}
}
}
}
func TestGetContainerStateAfterUpdate(t *testing.T) {
var (
pid = os.Getpid()
)
stat, err := system.Stat(pid)
if err != nil {
t.Fatal(err)
}
rootDir, err := ioutil.TempDir("", "TestGetContainerStateAfterUpdate")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rootDir)
container := &linuxContainer{
root: rootDir,
id: "myid",
config: &configs.Config{
Namespaces: []configs.Namespace{
{Type: configs.NEWPID},
{Type: configs.NEWNS},
{Type: configs.NEWNET},
{Type: configs.NEWUTS},
{Type: configs.NEWIPC},
},
Cgroups: &configs.Cgroup{
Resources: &configs.Resources{
Memory: 1024,
},
},
},
initProcess: &mockProcess{
_pid: pid,
started: stat.StartTime,
},
cgroupManager: &mockCgroupManager{},
}
container.state = &createdState{c: container}
state, err := container.State()
if err != nil {
t.Fatal(err)
}
if state.InitProcessPid != pid {
t.Fatalf("expected pid %d but received %d", pid, state.InitProcessPid)
}
if state.InitProcessStartTime != stat.StartTime {
t.Fatalf("expected process start time %d but received %d", stat.StartTime, state.InitProcessStartTime)
}
if state.Config.Cgroups.Resources.Memory != 1024 {
t.Fatalf("expected Memory to be 1024 but received %q", state.Config.Cgroups.Memory)
}
// Set initProcessStartTime so we fake to be running
container.initProcessStartTime = state.InitProcessStartTime
container.state = &runningState{c: container}
newConfig := container.Config()
newConfig.Cgroups.Resources.Memory = 2048
if err := container.Set(newConfig); err != nil {
t.Fatal(err)
}
state, err = container.State()
if err != nil {
t.Fatal(err)
}
if state.Config.Cgroups.Resources.Memory != 2048 {
t.Fatalf("expected Memory to be 2048 but received %q", state.Config.Cgroups.Memory)
}
}
| 1 | 19,358 | While it is technically OK to use `m.GetPaths()` here, and it's a mock code so it doesn't really matter, I'd still like to have `m.Path("devices")` used here, because since commit 714c91e9f73a1512808476eb532b4aa36bbb7530 we're not supposed to use GetPaths() for anything other than state save/restore. | opencontainers-runc | go |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.