patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -52,6 +52,13 @@ type ChaosCondition struct {
Reason string `json:"reason"`
}
+const (
+ Injecting string = "injecting"
+ Running string = "running"
+ Finished string = "finished"
+ Paused string = "paused"
+)
+
type DesiredPhase string
const ( | 1 | // Copyright 2019 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
)
const (
// PauseAnnotationKey defines the annotation used to pause a chaos
PauseAnnotationKey = "experiment.chaos-mesh.org/pause"
)
type ChaosStatus struct {
// Conditions represents the current global condition of the chaos
// +optional
Conditions []ChaosCondition `json:"conditions,omitempty"`
// Experiment records the last experiment state.
Experiment ExperimentStatus `json:"experiment"`
}
type ChaosConditionType string
const (
ConditionSelected ChaosConditionType = "Selected"
ConditionAllInjected ChaosConditionType = "AllInjected"
ConditionAllRecovered ChaosConditionType = "AllRecovered"
ConditionPaused ChaosConditionType = "Paused"
)
type ChaosCondition struct {
Type ChaosConditionType `json:"type"`
Status corev1.ConditionStatus `json:"status"`
// +optional
Reason string `json:"reason"`
}
type DesiredPhase string
const (
// The target of `RunningPhase` is to make all selected targets (container or pod) into "Injected" phase
RunningPhase DesiredPhase = "Run"
// The target of `StoppedPhase` is to make all selected targets (container or pod) into "NotInjected" phase
StoppedPhase DesiredPhase = "Stop"
)
type ExperimentStatus struct {
// +kubebuilder:validation:Enum=Run;Stop
DesiredPhase `json:"desiredPhase,omitempty"`
// +optional
// Records are used to track the running status
Records []*Record `json:"containerRecords,omitempty"`
}
type Record struct {
Id string `json:"id"`
SelectorKey string `json:"selectorKey"`
Phase Phase `json:"phase"`
}
type Phase string
const (
// NotInjected means the target is not injected yet. The controller could call "Inject" on the target
NotInjected Phase = "Not Injected"
// Injected means the target is injected. It's safe to recover it.
Injected Phase = "Injected"
)
var log = ctrl.Log.WithName("api")
// +kubebuilder:object:generate=false
// InnerObject is basic Object for the Reconciler
type InnerObject interface {
IsDeleted() bool
IsPaused() bool
GetChaos() *ChaosInstance
GetDuration() (*time.Duration, error)
DurationExceeded(time.Time) (bool, time.Duration, error)
StatefulObject
}
// +kubebuilder:object:generate=false
// StatefulObject defines a basic Object that can get the status
type StatefulObject interface {
runtime.Object
GetStatus() *ChaosStatus
GetObjectMeta() *metav1.ObjectMeta
}
// +kubebuilder:object:generate=false
// MetaObject defines a very basic Object that can get meta
type MetaObject interface {
runtime.Object
GetObjectMeta() *metav1.ObjectMeta
}
// +kubebuilder:object:generate=false
// ChaosInstance defines some common attribute for a chaos
type ChaosInstance struct {
Name string
Namespace string
Kind string
StartTime time.Time
EndTime time.Time
Action string
Duration string
Status string
UID string
}
// +kubebuilder:object:generate=false
// ChaosList defines a common interface for chaos lists
type ChaosList interface {
runtime.Object
ListChaos() []*ChaosInstance
}
| 1 | 22,362 | using a certain type instead of using string directly. | chaos-mesh-chaos-mesh | go |
@@ -761,7 +761,7 @@ func TestCrConflictMergedRenameSetMtimeFile(t *testing.T) {
)
}
-// alice and both both rename(the same file, causing a copy.),
+// alice and both both rename the same file, causing a copy.,
func TestCrConflictRenameSameFile(t *testing.T) {
test(t,
users("alice", "bob"), | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
// These tests all do multiple operations while a user is unstaged.
package test
import (
"testing"
"time"
)
// bob and alice both write(to the same file),
func TestCrConflictWriteFile(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
mkfile("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", "world"),
),
as(bob, noSync(),
write("a/b", "uh oh"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", "world"),
read(crname("a/b", bob), "uh oh"),
),
as(alice,
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", "world"),
read(crname("a/b", bob), "uh oh"),
),
)
}
// bob and alice both create the same entry with different types
func TestCrConflictCreateWithDifferentTypes(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
mkdir("a"),
mkfile("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
mkdir("a/c"),
),
as(bob, noSync(),
mkfile("a/c", ""),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", "c$": "DIR",
crnameEsc("c", bob): "FILE"}),
read("a/b", "hello"),
lsdir("a/c", m{}),
read(crname("a/c", bob), ""),
),
as(alice,
lsdir("a/", m{"b$": "FILE", "c$": "DIR",
crnameEsc("c", bob): "FILE"}),
read("a/b", "hello"),
lsdir("a/c", m{}),
read(crname("a/c", bob), ""),
),
)
}
// bob and alice both create the same file with different types
func TestCrConflictCreateFileWithDifferentTypes(t *testing.T) {
test(t,
skip("dokan", "Does not work with Dokan."),
users("alice", "bob"),
as(alice,
mkdir("a"),
mkfile("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
mkfile("a/c", ""),
),
as(bob, noSync(),
link("a/c", "b"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", "c$": "FILE",
crnameEsc("c", bob): "SYM"}),
read("a/b", "hello"),
read("a/c", ""),
read(crname("a/c", bob), "hello"),
),
as(alice,
lsdir("a/", m{"b$": "FILE", "c$": "FILE",
crnameEsc("c", bob): "SYM"}),
read("a/b", "hello"),
read("a/c", ""),
read(crname("a/c", bob), "hello"),
),
)
}
// bob and alice both create the same symlink with different contents
func TestCrConflictCreateSymlinkWithDifferentContents(t *testing.T) {
test(t,
skip("dokan", "Does not work with Dokan."),
users("alice", "bob"),
as(alice,
mkdir("a"),
mkfile("a/b", "hello"),
mkfile("a/c", "world"),
),
as(bob,
disableUpdates(),
),
as(alice,
link("a/d", "b"),
),
as(bob, noSync(),
link("a/d", "c"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", "c$": "FILE", "d$": "SYM",
crnameEsc("d", bob): "SYM"}),
read("a/b", "hello"),
read("a/c", "world"),
read("a/d", "hello"),
read(crname("a/d", bob), "world"),
),
as(alice,
lsdir("a/", m{"b$": "FILE", "c$": "FILE", "d$": "SYM",
crnameEsc("d", bob): "SYM"}),
read("a/b", "hello"),
read("a/c", "world"),
read("a/d", "hello"),
read(crname("a/d", bob), "world"),
),
)
}
// bob and alice both write(to the same file), but on a non-default day.
func TestCrConflictWriteFileWithAddTime(t *testing.T) {
timeInc := 25 * time.Hour
test(t,
users("alice", "bob"),
as(alice,
mkfile("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
addTime(timeInc),
write("a/b", "world"),
),
as(bob, noSync(),
write("a/b", "uh oh"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE",
crnameAtTimeEsc("b", bob, timeInc): "FILE"}),
read("a/b", "world"),
read(crnameAtTime("a/b", bob, timeInc), "uh oh"),
),
as(alice,
lsdir("a/", m{"b$": "FILE",
crnameAtTimeEsc("b", bob, timeInc): "FILE"}),
read("a/b", "world"),
read(crnameAtTime("a/b", bob, timeInc), "uh oh"),
),
)
}
// bob and alice both write(to the same file),
func TestCrConflictWriteFileWithExtension(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
mkfile("a/foo.tar.gz", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/foo.tar.gz", "world"),
),
as(bob, noSync(),
write("a/foo.tar.gz", "uh oh"),
reenableUpdates(),
lsdir("a/", m{"foo.tar.gz$": "FILE", crnameEsc("foo.tar.gz", bob): "FILE"}),
read("a/foo.tar.gz", "world"),
read(crname("a/foo.tar.gz", bob), "uh oh"),
),
as(alice,
lsdir("a/", m{"foo.tar.gz$": "FILE", crnameEsc("foo.tar.gz", bob): "FILE"}),
read("a/foo.tar.gz", "world"),
read(crname("a/foo.tar.gz", bob), "uh oh"),
),
)
}
// bob and alice both create the same file
func TestCrConflictCreateFile(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
mkdir("a"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", "world"),
),
as(bob, noSync(),
write("a/b", "uh oh"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", "world"),
read(crname("a/b", bob), "uh oh"),
),
as(alice,
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", "world"),
read(crname("a/b", bob), "uh oh"),
),
)
}
// alice setattr's a file, while bob removes, recreates and writes to
// a file of the same name. Regression test for KBFS-668.
func TestCrConflictSetattrVsRecreatedFileInRoot(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
mkfile("a", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
setex("a", true),
),
as(bob, noSync(),
write("a", "uh oh"),
rm("a"),
mkfile("a", "world"),
reenableUpdates(),
lsdir("", m{"a$": "EXEC", crnameEsc("a", bob): "FILE"}),
read("a", "hello"),
read(crname("a", bob), "world"),
),
as(alice,
lsdir("", m{"a$": "EXEC", crnameEsc("a", bob): "FILE"}),
read("a", "hello"),
read(crname("a", bob), "world"),
),
)
}
// bob creates a directory with the same name that alice used for a file
func TestCrConflictCauseRenameOfMergedFile(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
mkdir("a"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", "world"),
),
as(bob, noSync(),
write("a/b/c", "uh oh"),
reenableUpdates(),
lsdir("a/", m{"b$": "DIR", crnameEsc("b", alice): "FILE"}),
read(crname("a/b", alice), "world"),
read("a/b/c", "uh oh"),
),
as(alice,
lsdir("a/", m{"b$": "DIR", crnameEsc("b", alice): "FILE"}),
read(crname("a/b", alice), "world"),
read("a/b/c", "uh oh"),
),
)
}
// bob creates a directory with the same name that alice used for a
// file that used to exist at that location
func TestCrConflictCauseRenameOfMergedRecreatedFile(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
mkdir("a"),
write("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", "world"),
),
as(bob, noSync(),
rm("a/b"),
write("a/b/c", "uh oh"),
reenableUpdates(),
lsdir("a/", m{"b$": "DIR", crnameEsc("b", alice): "FILE"}),
read(crname("a/b", alice), "world"),
read("a/b/c", "uh oh"),
),
as(alice,
lsdir("a/", m{"b$": "DIR", crnameEsc("b", alice): "FILE"}),
read(crname("a/b", alice), "world"),
read("a/b/c", "uh oh"),
),
)
}
// bob renames a file over one modified by alice.
func TestCrConflictUnmergedRenameFileOverModifiedFile(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b", "hello"),
write("a/c", "world"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", "uh oh"),
),
as(bob, noSync(),
rename("a/c", "a/b"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", "uh oh"),
read(crname("a/b", bob), "world"),
),
as(alice,
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", "uh oh"),
read(crname("a/b", bob), "world"),
),
)
}
// bob modifies and renames a file that was modified by alice.
func TestCrConflictUnmergedRenameModifiedFile(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", "world"),
),
as(bob, noSync(),
write("a/b", "uh oh"),
rename("a/b", "a/c"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", "c$": "FILE"}),
read("a/b", "world"),
read("a/c", "uh oh"),
),
as(alice,
lsdir("a/", m{"b$": "FILE", "c$": "FILE"}),
read("a/b", "world"),
read("a/c", "uh oh"),
),
)
}
// bob modifies and renames a file that was modified by alice, while
// alice also made a file with the new name.
func TestCrConflictUnmergedRenameModifiedFileAndConflictFile(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", "world"),
mkfile("a/c", "CONFLICT"),
),
as(bob, noSync(),
write("a/b", "uh oh"),
rename("a/b", "a/c"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", "c$": "FILE", crnameEsc("c", bob): "FILE"}),
read("a/b", "world"),
read("a/c", "CONFLICT"),
read(crname("a/c", bob), "uh oh"),
),
as(alice,
lsdir("a/", m{"b$": "FILE", "c$": "FILE", crnameEsc("c", bob): "FILE"}),
read("a/b", "world"),
read("a/c", "CONFLICT"),
read(crname("a/c", bob), "uh oh"),
),
)
}
// bob modifies and renames (to another dir) a file that was modified
// by alice.
func TestCrConflictUnmergedRenameAcrossDirsModifiedFile(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", "world"),
),
as(bob, noSync(),
write("a/b", "uh oh"),
rename("a/b", "b/c"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE"}),
read("a/b", "world"),
lsdir("b/", m{"c$": "FILE"}),
read("b/c", "uh oh"),
),
as(alice,
lsdir("a/", m{"b$": "FILE"}),
read("a/b", "world"),
lsdir("b/", m{"c$": "FILE"}),
read("b/c", "uh oh"),
),
)
}
// bob sets the mtime on and renames a file that had its mtime set by alice.
func TestCrConflictUnmergedRenameSetMtimeFile(t *testing.T) {
targetMtime1 := time.Now().Add(1 * time.Minute)
targetMtime2 := targetMtime1.Add(1 * time.Minute)
test(t,
users("alice", "bob"),
as(alice,
write("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
setmtime("a/b", targetMtime1),
),
as(bob, noSync(),
setmtime("a/b", targetMtime2),
rename("a/b", "a/c"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", "c$": "FILE"}),
mtime("a/b", targetMtime1),
mtime("a/c", targetMtime2),
),
as(alice,
lsdir("a/", m{"b$": "FILE", "c$": "FILE"}),
mtime("a/b", targetMtime1),
mtime("a/c", targetMtime2),
),
)
}
// bob renames a file from a new directory over one modified by alice.
func TestCrConflictUnmergedRenameFileInNewDirOverModifiedFile(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b", "hello"),
write("a/c", "world"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", "uh oh"),
),
as(bob, noSync(),
rename("a/c", "e/c"),
rename("e/c", "a/b"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
lsdir("e/", m{}),
read("a/b", "uh oh"),
read(crname("a/b", bob), "world"),
),
as(alice,
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
lsdir("e/", m{}),
read("a/b", "uh oh"),
read(crname("a/b", bob), "world"),
),
)
}
// bob renames an existing directory over one created by alice.
// TODO: it would be better if this weren't a conflict.
func TestCrConflictUnmergedRenamedDir(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b/c", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/d/e", "world"),
),
as(bob, noSync(),
write("a/b/f", "uh oh"),
rename("a/b", "a/d"),
reenableUpdates(),
lsdir("a/", m{"d$": "DIR", crnameEsc("d", bob): "DIR"}),
lsdir("a/d", m{"e": "FILE"}),
lsdir(crname("a/d", bob), m{"c": "FILE", "f": "FILE"}),
read(crname("a/d", bob)+"/c", "hello"),
read("a/d/e", "world"),
read(crname("a/d", bob)+"/f", "uh oh"),
),
as(alice,
lsdir("a/", m{"d$": "DIR", crnameEsc("d", bob): "DIR"}),
lsdir("a/d", m{"e": "FILE"}),
lsdir(crname("a/d", bob), m{"c": "FILE", "f": "FILE"}),
read(crname("a/d", bob)+"/c", "hello"),
read("a/d/e", "world"),
read(crname("a/d", bob)+"/f", "uh oh"),
),
)
}
// bob renames a directory over one made non-empty by alice
func TestCrConflictUnmergedRenameDirOverNonemptyDir(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
mkdir("a/b"),
mkfile("a/c/d", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
mkfile("a/b/e", "uh oh"),
),
as(bob, noSync(),
rm("a/b"),
rename("a/c", "a/b"),
reenableUpdates(),
lsdir("a/", m{"b$": "DIR", crnameEsc("b", bob): "DIR"}),
lsdir("a/b", m{"e": "FILE"}),
lsdir(crname("a/b", bob), m{"d": "FILE"}),
),
as(alice,
lsdir("a/", m{"b$": "DIR", crnameEsc("b", bob): "DIR"}),
lsdir("a/b", m{"e": "FILE"}),
lsdir(crname("a/b", bob), m{"d": "FILE"}),
),
)
}
// alice renames an existing directory over one created by bob. TODO:
// it would be better if this weren't a conflict.
func TestCrConflictMergedRenamedDir(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b/c", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b/f", "uh oh"),
rename("a/b", "a/d"),
),
as(bob, noSync(),
write("a/d/e", "world"),
reenableUpdates(),
lsdir("a/", m{"d$": "DIR", crnameEsc("d", bob): "DIR"}),
lsdir("a/d", m{"c": "FILE", "f": "FILE"}),
read("a/d/c", "hello"),
read(crname("a/d", bob)+"/e", "world"),
read("a/d/f", "uh oh"),
),
as(alice,
lsdir("a/", m{"d$": "DIR", crnameEsc("d", bob): "DIR"}),
lsdir("a/d", m{"c": "FILE", "f": "FILE"}),
read("a/d/c", "hello"),
read(crname("a/d", bob)+"/e", "world"),
read("a/d/f", "uh oh"),
),
)
}
// alice renames a file over one modified by bob.
func TestCrConflictMergedRenameFileOverModifiedFile(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b", "hello"),
write("a/c", "world"),
),
as(bob,
disableUpdates(),
),
as(alice,
rename("a/c", "a/b"),
),
as(bob, noSync(),
write("a/b", "uh oh"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", "world"),
read(crname("a/b", bob), "uh oh"),
),
as(alice,
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", "world"),
read(crname("a/b", bob), "uh oh"),
),
)
}
// alice modifies and renames a file that was modified by bob.
func TestCrConflictMergedRenameModifiedFile(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", "world"),
rename("a/b", "a/c"),
),
as(bob, noSync(),
write("a/b", "uh oh"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", "c$": "FILE"}),
read("a/b", "uh oh"),
read("a/c", "world"),
),
as(alice,
lsdir("a/", m{"b$": "FILE", "c$": "FILE"}),
read("a/b", "uh oh"),
read("a/c", "world"),
),
)
}
// alice modifies and renames a file that was modified by bob, while
// bob also made a file with the new name.
func TestCrConflictMergedRenameModifiedFileAndConflictFile(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", "uh oh"),
rename("a/b", "a/c"),
),
as(bob, noSync(),
write("a/b", "world"),
mkfile("a/c", "CONFLICT"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", "c$": "FILE", crnameEsc("c", bob): "FILE"}),
read("a/b", "world"),
read("a/c", "uh oh"),
read(crname("a/c", bob), "CONFLICT"),
),
as(alice,
lsdir("a/", m{"b$": "FILE", "c$": "FILE", crnameEsc("c", bob): "FILE"}),
read("a/b", "world"),
read("a/c", "uh oh"),
read(crname("a/c", bob), "CONFLICT"),
),
)
}
// alice modifies and renames (to another dir) a file that was modified
// by bob.
func TestCrConflictMergedRenameAcrossDirsModifiedFile(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", "world"),
rename("a/b", "b/c"),
),
as(bob, noSync(),
write("a/b", "uh oh"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE"}),
read("a/b", "uh oh"),
lsdir("b/", m{"c$": "FILE"}),
read("b/c", "world"),
),
as(alice,
lsdir("a/", m{"b$": "FILE"}),
read("a/b", "uh oh"),
lsdir("b/", m{"c$": "FILE"}),
read("b/c", "world"),
),
)
}
// alice sets the mtime on and renames a file that had its mtime set by bob.
func TestCrConflictMergedRenameSetMtimeFile(t *testing.T) {
targetMtime1 := time.Now().Add(1 * time.Minute)
targetMtime2 := targetMtime1.Add(1 * time.Minute)
test(t,
users("alice", "bob"),
as(alice,
write("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
setmtime("a/b", targetMtime1),
rename("a/b", "a/c"),
),
as(bob, noSync(),
setmtime("a/b", targetMtime2),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", "c$": "FILE"}),
mtime("a/b", targetMtime2),
mtime("a/c", targetMtime1),
),
as(alice,
lsdir("a/", m{"b$": "FILE", "c$": "FILE"}),
mtime("a/b", targetMtime2),
mtime("a/c", targetMtime1),
),
)
}
// alice and both both rename(the same file, causing a copy.),
func TestCrConflictRenameSameFile(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
rename("a/b", "a/c"),
),
as(bob, noSync(),
rename("a/b", "a/d"),
reenableUpdates(),
lsdir("a/", m{"c": "FILE", "d": "FILE"}),
read("a/c", "hello"),
read("a/d", "hello"),
),
as(alice,
lsdir("a/", m{"c": "FILE", "d": "FILE"}),
read("a/c", "hello"),
read("a/d", "hello"),
write("a/c", "world"),
),
as(bob,
read("a/c", "world"),
read("a/d", "hello"),
),
)
}
// alice and both both rename(the same executable file, causing a copy.),
func TestCrConflictRenameSameEx(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b", "hello"),
setex("a/b", true),
),
as(bob,
disableUpdates(),
),
as(alice,
rename("a/b", "a/c"),
),
as(bob, noSync(),
rename("a/b", "a/d"),
reenableUpdates(),
lsdir("a/", m{"c": "EXEC", "d": "EXEC"}),
read("a/c", "hello"),
read("a/d", "hello"),
),
as(alice,
lsdir("a/", m{"c": "EXEC", "d": "EXEC"}),
read("a/c", "hello"),
read("a/d", "hello"),
write("a/c", "world"),
),
as(bob,
read("a/c", "world"),
read("a/d", "hello"),
),
)
}
// alice and both both rename(the same symlink.),
func TestCrConflictRenameSameSymlink(t *testing.T) {
test(t,
skip("dokan", "Does not work with Dokan."),
users("alice", "bob"),
as(alice,
write("a/foo", "hello"),
link("a/b", "foo"),
),
as(bob,
disableUpdates(),
),
as(alice,
rename("a/b", "a/c"),
),
as(bob, noSync(),
rename("a/b", "a/d"),
reenableUpdates(),
lsdir("a/", m{"foo": "FILE", "c": "SYM", "d": "SYM"}),
read("a/c", "hello"),
read("a/d", "hello"),
),
as(alice,
lsdir("a/", m{"foo": "FILE", "c": "SYM", "d": "SYM"}),
read("a/c", "hello"),
read("a/d", "hello"),
write("a/c", "world"),
),
as(bob,
read("a/c", "world"),
read("a/d", "world"),
),
)
}
// alice and bob both rename(the same directory, causing a symlink to),
// be created.
func TestCrConflictRenameSameDir(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b/c", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
rename("a/b", "a/d"),
),
as(bob, noSync(),
rename("a/b", "a/e"),
reenableUpdates(),
lsdir("a/", m{"d": "DIR", "e": "SYM"}),
read("a/d/c", "hello"),
read("a/e/c", "hello"),
),
as(alice,
lsdir("a/", m{"d": "DIR", "e": "SYM"}),
read("a/d/c", "hello"),
read("a/e/c", "hello"),
write("a/d/f", "world"),
read("a/e/f", "world"),
),
as(bob,
read("a/e/f", "world"),
),
)
}
// alice and bob both rename(the same directory, causing a symlink to),
// be created.
func TestCrConflictRenameSameDirUpward(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b/c/d/e/foo", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
rename("a/b/c/d/e", "a/e"),
),
as(bob, noSync(),
rename("a/b/c/d/e", "a/b/c/d/f"),
reenableUpdates(),
lsdir("a/", m{"b": "DIR", "e": "DIR"}),
lsdir("a/e", m{"foo": "FILE"}),
lsdir("a/b/c/d", m{"f": "SYM"}),
lsdir("a/b/c/d/f", m{"foo": "FILE"}),
read("a/e/foo", "hello"),
lsdir("a/b/c/d/f", m{"foo": "FILE"}),
),
as(alice,
lsdir("a/", m{"b": "DIR", "e": "DIR"}),
lsdir("a/e", m{"foo": "FILE"}),
lsdir("a/b/c/d", m{"f": "SYM"}),
lsdir("a/b/c/d/f", m{"foo": "FILE"}),
read("a/e/foo", "hello"),
lsdir("a/b/c/d/f", m{"foo": "FILE"}),
write("a/e/foo2", "world"),
),
as(bob,
read("a/b/c/d/f/foo2", "world"),
),
)
}
// alice and bob both rename(the same directory, causing a symlink to),
// be created.
func TestCrConflictRenameSameDirMergedUpward(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b/c/d/e/foo", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
rename("a/b/c/d/e", "a/b/c/d/f"),
),
as(bob, noSync(),
rename("a/b/c/d/e", "a/e"),
reenableUpdates(),
lsdir("a/", m{"b": "DIR", "e": "SYM"}),
lsdir("a/e", m{"foo": "FILE"}),
lsdir("a/b/c/d", m{"f": "DIR"}),
lsdir("a/b/c/d/f", m{"foo": "FILE"}),
read("a/e/foo", "hello"),
lsdir("a/b/c/d/f", m{"foo": "FILE"}),
),
as(alice,
lsdir("a/", m{"b": "DIR", "e": "SYM"}),
lsdir("a/e", m{"foo": "FILE"}),
lsdir("a/b/c/d", m{"f": "DIR"}),
lsdir("a/b/c/d/f", m{"foo": "FILE"}),
read("a/e/foo", "hello"),
lsdir("a/b/c/d/f", m{"foo": "FILE"}),
write("a/e/foo2", "world"),
),
as(bob,
read("a/b/c/d/f/foo2", "world"),
),
)
}
func TestCrConflictRenameSameDirDownward(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b/foo", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
rename("a/b", "a/c/d/e/f"),
),
as(bob, noSync(),
rename("a/b", "a/g"),
reenableUpdates(),
lsdir("a/", m{"c": "DIR", "g": "SYM"}),
lsdir("a/c/d/e/f", m{"foo": "FILE"}),
lsdir("a/g", m{"foo": "FILE"}),
read("a/c/d/e/f/foo", "hello"),
read("a/g/foo", "hello"),
),
as(alice,
lsdir("a/", m{"c": "DIR", "g": "SYM"}),
lsdir("a/c/d/e/f", m{"foo": "FILE"}),
lsdir("a/g", m{"foo": "FILE"}),
read("a/c/d/e/f/foo", "hello"),
read("a/g/foo", "hello"),
write("a/c/d/e/f/foo2", "world"),
),
as(bob,
read("a/g/foo2", "world"),
),
)
}
func TestCrConflictRenameSameDirSideways(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b/c/d/foo", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
rename("a/b/c/d", "a/e/f/g"),
),
as(bob, noSync(),
rename("a/b/c/d", "a/b/c/h"),
reenableUpdates(),
lsdir("a/e/f", m{"g": "DIR"}),
lsdir("a/b/c", m{"h": "SYM"}),
lsdir("a/e/f/g", m{"foo": "FILE"}),
lsdir("a/b/c/h", m{"foo": "FILE"}),
read("a/e/f/g/foo", "hello"),
read("a/b/c/h/foo", "hello"),
),
as(alice,
lsdir("a/e/f", m{"g": "DIR"}),
lsdir("a/b/c", m{"h": "SYM"}),
lsdir("a/e/f/g", m{"foo": "FILE"}),
lsdir("a/b/c/h", m{"foo": "FILE"}),
read("a/e/f/g/foo", "hello"),
read("a/b/c/h/foo", "hello"),
write("a/e/f/g/foo2", "world"),
),
as(bob,
read("a/b/c/h/foo2", "world"),
),
)
}
// bob renames an existing directory over one created by alice, twice.
// TODO: it would be better if this weren't a conflict.
func TestCrConflictUnmergedRenamedDirDouble(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
write("a/b/c", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/d/e", "world"),
),
as(bob, noSync(),
write("a/b/f", "uh oh"),
rename("a/b", "a/d"),
reenableUpdates(),
lsdir("a/", m{"d$": "DIR", crnameEsc("d", bob): "DIR"}),
lsdir("a/d", m{"e": "FILE"}),
lsdir(crname("a/d", bob), m{"c": "FILE", "f": "FILE"}),
read(crname("a/d", bob)+"/c", "hello"),
read("a/d/e", "world"),
read(crname("a/d", bob)+"/f", "uh oh"),
),
as(alice,
lsdir("a/", m{"d$": "DIR", crnameEsc("d", bob): "DIR"}),
lsdir("a/d", m{"e": "FILE"}),
lsdir(crname("a/d", bob), m{"c": "FILE", "f": "FILE"}),
read(crname("a/d", bob)+"/c", "hello"),
read("a/d/e", "world"),
read(crname("a/d", bob)+"/f", "uh oh"),
rm("a/d/e"),
rm("a/d"),
write("a/b/c", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/d/e", "world"),
),
as(bob, noSync(),
write("a/b/f", "uh oh"),
rename("a/b", "a/d"),
reenableUpdates(),
lsdir("a/", m{"d$": "DIR", crnameEsc("d", bob) + "$": "DIR", crnameEsc("d", bob) + ` \(1\)`: "DIR"}),
lsdir("a/d", m{"e": "FILE"}),
lsdir(crname("a/d", bob)+" (1)", m{"c": "FILE", "f": "FILE"}),
read("a/d/e", "world"),
),
as(alice,
lsdir("a/", m{"d$": "DIR", crnameEsc("d", bob) + "$": "DIR", crnameEsc("d", bob) + ` \(1\)`: "DIR"}),
lsdir("a/d", m{"e": "FILE"}),
lsdir(crname("a/d", bob)+" (1)", m{"c": "FILE", "f": "FILE"}),
read("a/d/e", "world"),
),
)
}
// bob and alice both write(to the same file),
func TestCrConflictWriteFileDouble(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
mkfile("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", "world"),
),
as(bob, noSync(),
write("a/b", "uh oh"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", "world"),
read(crname("a/b", bob), "uh oh"),
),
as(alice,
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", "world"),
read(crname("a/b", bob), "uh oh"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", "another write"),
),
as(bob, noSync(),
write("a/b", "uh oh again!"),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob) + "$": "FILE", crnameEsc("b", bob) + ` \(1\)`: "FILE"}),
read("a/b", "another write"),
read(crname("a/b", bob), "uh oh"),
read(crname("a/b", bob)+" (1)", "uh oh again!"),
),
as(alice,
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob) + "$": "FILE", crnameEsc("b", bob) + ` \(1\)`: "FILE"}),
read("a/b", "another write"),
read(crname("a/b", bob), "uh oh"),
read(crname("a/b", bob)+" (1)", "uh oh again!"),
),
)
}
// bob and alice both write(to the same file),
func TestCrConflictWriteFileDoubleWithExtensions(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
mkfile("a/file.tar.gz", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/file.tar.gz", "world"),
),
as(bob, noSync(),
write("a/file.tar.gz", "uh oh"),
reenableUpdates(),
lsdir("a/", m{"file.tar.gz$": "FILE", crnameEsc("file.tar.gz", bob): "FILE"}),
read("a/file.tar.gz", "world"),
read(crname("a/file.tar.gz", bob), "uh oh"),
),
as(alice,
lsdir("a/", m{"file.tar.gz$": "FILE", crnameEsc("file.tar.gz", bob): "FILE"}),
read("a/file.tar.gz", "world"),
read(crname("a/file.tar.gz", bob), "uh oh"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/file.tar.gz", "another write"),
),
as(bob, noSync(),
write("a/file.tar.gz", "uh oh again!"),
reenableUpdates(),
lsdir("a/", m{"file.tar.gz$": "FILE", crnameEsc("file.tar.gz", bob) + "$": "FILE", crnameEsc("file", bob) + ` \(1\).tar.gz`: "FILE"}),
read("a/file.tar.gz", "another write"),
read(crname("a/file.tar.gz", bob), "uh oh"),
read(crname("a/file", bob)+" (1).tar.gz", "uh oh again!"),
),
as(alice,
lsdir("a/", m{"file.tar.gz$": "FILE", crnameEsc("file.tar.gz", bob) + "$": "FILE", crnameEsc("file", bob) + ` \(1\).tar.gz`: "FILE"}),
read("a/file.tar.gz", "another write"),
read(crname("a/file.tar.gz", bob), "uh oh"),
read(crname("a/file", bob)+" (1).tar.gz", "uh oh again!"),
),
)
}
// bob causes a rename(cycle with a conflict while unstaged),
func TestCrRenameCycleWithConflict(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
mkdir("a"),
mkdir("a/b"),
mkdir("a/c"),
),
as(bob,
disableUpdates(),
),
as(alice,
rename("a/c", "a/b/c"),
),
as(bob, noSync(),
rename("a/b", "a/c/b"),
write("a/b", "uh oh"),
reenableUpdates(),
lsdir("a/", m{"b$": "DIR", crnameEsc("b", bob): "FILE"}),
read(crname("a/b", bob), "uh oh"),
lsdir("a/b/", m{"c": "DIR"}),
lsdir("a/b/c", m{"b": "SYM"}),
lsdir("a/b/c/b", m{"c": "DIR"}),
),
as(alice,
lsdir("a/", m{"b$": "DIR", crnameEsc("b", bob): "FILE"}),
read(crname("a/b", bob), "uh oh"),
lsdir("a/b/", m{"c": "DIR"}),
lsdir("a/b/c", m{"b": "SYM"}),
lsdir("a/b/c/b", m{"c": "DIR"}),
write("a/b/d", "hello"),
),
as(bob,
read("a/b/c/b/d", "hello"),
),
)
}
// bob causes a rename(cycle with two conflicts while unstaged),
func TestCrRenameCycleWithTwoConflicts(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
mkdir("a"),
mkdir("a/b"),
mkdir("a/c"),
),
as(bob,
disableUpdates(),
),
as(alice,
rename("a/c", "a/b/c"),
write("a/b/c/b", "uh oh"),
),
as(bob, noSync(),
rename("a/b", "a/c/b"),
write("a/b", "double uh oh"),
reenableUpdates(),
lsdir("a/", m{"b$": "DIR", crnameEsc("b", bob): "FILE"}),
read(crname("a/b", bob), "double uh oh"),
lsdir("a/b/", m{"c": "DIR"}),
lsdir("a/b/c", m{"b$": "SYM", crnameEsc("b", alice): "FILE"}),
lsdir("a/b/c/b", m{"c": "DIR"}),
),
as(alice,
lsdir("a/", m{"b$": "DIR", crnameEsc("b", bob): "FILE"}),
read(crname("a/b", bob), "double uh oh"),
lsdir("a/b/", m{"c": "DIR"}),
lsdir("a/b/c", m{"b$": "SYM", crnameEsc("b", alice): "FILE"}),
lsdir("a/b/c/b", m{"c": "DIR"}),
write("a/b/d", "hello"),
),
as(bob,
read("a/b/c/b/d", "hello"),
),
)
}
// bob causes a rename(cycle with two conflicts while unstaged),
func TestCrRenameCycleWithConflictAndMergedDir(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
mkdir("a"),
mkdir("a/b"),
mkdir("a/c"),
),
as(bob,
disableUpdates(),
),
as(alice,
rename("a/c", "a/b/c"),
mkdir("a/b/c/b"),
),
as(bob, noSync(),
rename("a/b", "a/c/b"),
write("a/b", "uh oh"),
reenableUpdates(),
lsdir("a/", m{"b$": "DIR", crnameEsc("b", bob): "FILE"}),
read(crname("a/b", bob), "uh oh"),
lsdir("a/b/", m{"c": "DIR"}),
lsdir("a/b/c", m{"b$": "DIR", crnameEsc("b", bob): "SYM"}),
lsdir(crname("a/b/c/b", bob), m{"c": "DIR"}),
lsdir("a/b/c/b", m{}),
),
as(alice,
lsdir("a/", m{"b$": "DIR", crnameEsc("b", bob): "FILE"}),
read(crname("a/b", bob), "uh oh"),
lsdir("a/b/", m{"c": "DIR"}),
lsdir("a/b/c", m{"b$": "DIR", crnameEsc("b", bob): "SYM"}),
lsdir(crname("a/b/c/b", bob), m{"c": "DIR"}),
lsdir("a/b/c/b", m{}),
write("a/b/d", "hello"),
),
as(bob,
read(crname("a/b/c/b", bob)+"/d", "hello"),
),
)
}
// alice and bob both truncate the same file to different sizes
func TestCrBothTruncateFileDifferentSizes(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
mkfile("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
truncate("a/b", 4),
),
as(bob, noSync(),
truncate("a/b", 3),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", "hell"),
read(crname("a/b", bob), "hel"),
),
as(alice,
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", "hell"),
read(crname("a/b", bob), "hel"),
),
)
}
// alice and bob both truncate the same file to different sizes, after
// truncating to the same size
func TestCrBothTruncateFileDifferentSizesAfterSameSize(t *testing.T) {
test(t,
users("alice", "bob"),
as(alice,
mkfile("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
truncate("a/b", 0),
),
as(bob, noSync(),
truncate("a/b", 0),
truncate("a/b", 3),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", ""),
read(crname("a/b", bob), string(make([]byte, 3))),
),
as(alice,
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", ""),
read(crname("a/b", bob), string(make([]byte, 3))),
),
)
}
// alice and bob both set the mtime on a file
func TestCrBothSetMtimeFile(t *testing.T) {
targetMtime1 := time.Now().Add(1 * time.Minute)
targetMtime2 := targetMtime1.Add(1 * time.Minute)
test(t,
users("alice", "bob"),
as(alice,
mkfile("a/b", "hello"),
),
as(bob,
disableUpdates(),
),
as(alice,
setmtime("a/b", targetMtime1),
),
as(bob, noSync(),
setmtime("a/b", targetMtime2),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
mtime("a/b", targetMtime1),
mtime(crname("a/b", bob), targetMtime2),
),
as(alice,
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
mtime("a/b", targetMtime1),
mtime(crname("a/b", bob), targetMtime2),
),
)
}
// alice and bob both set the mtime on a dir
func TestCrBothSetMtimeDir(t *testing.T) {
targetMtime1 := time.Now().Add(1 * time.Minute)
targetMtime2 := targetMtime1.Add(1 * time.Minute)
test(t,
skip("dokan", "Dokan can't read mtimes on symlinks."),
users("alice", "bob"),
as(alice,
mkdir("a"),
),
as(bob,
disableUpdates(),
),
as(alice,
setmtime("a", targetMtime1),
),
as(bob, noSync(),
setmtime("a", targetMtime2),
reenableUpdates(),
lsdir("", m{"a$": "DIR", crnameEsc("a", bob): "SYM"}),
mtime("a", targetMtime1),
mtime(crname("a", bob), targetMtime2),
),
as(alice,
lsdir("", m{"a$": "DIR", crnameEsc("a", bob): "SYM"}),
mtime("a", targetMtime1),
mtime(crname("a", bob), targetMtime2),
),
)
}
| 1 | 16,625 | fix trailing , | keybase-kbfs | go |
@@ -16,7 +16,7 @@
<% end %>
... <%= t'trace.trace.ago', :time_in_words_ago => time_ago_in_words(trace.timestamp) %></span>
<%= link_to t('trace.trace.more'), {:controller => 'trace', :action => 'view', :display_name => trace.user.display_name, :id => trace.id}, {:title => t('trace.trace.trace_details')} %> /
- <%= link_to_if trace.inserted?, t('trace.trace.map'), {:controller => 'site', :action => 'index', :anchor => "map=14/#{trace.latitude}/#{trace.longitude}"}, {:title => t('trace.trace.view_map')} %> /
+ <%= link_to_if trace.inserted?, t('trace.trace.map'), {:controller => 'site', :action => 'index', :mlat => "#{trace.latitude}", :mlon => "#{trace.longitude}", :anchor => "map=14/#{trace.latitude}/#{trace.longitude}"}, {:title => t('trace.trace.view_map')} %> /
<%= link_to t('trace.trace.edit'), {:controller => 'site', :action => 'edit', :gpx => trace.id }, {:title => t('trace.trace.edit_map')} %>
<span class="trace_<%= trace.visibility %>"><%= t('trace.trace.' + trace.visibility) %></span>
<br /> | 1 | <tr>
<% cl = cycle('table0', 'table1') %>
<td class="<%= cl %>">
<% if STATUS != :gpx_offline %>
<% if trace.inserted %>
<a href="<%= url_for :controller => 'trace', :action => 'view', :id => trace.id, :display_name => trace.user.display_name %>"><img src="<%= url_for :controller => 'trace', :action => 'icon', :id => trace.id, :display_name => trace.user.display_name %>" border="0" alt="" /></a>
<% else %>
<span class="trace_pending"><%= t'trace.trace.pending' %></span>
<% end %>
<% end %>
</td>
<td class="<%= cl %>"><%= link_to trace.name, {:controller => 'trace', :action => 'view', :display_name => trace.user.display_name, :id => trace.id} %>
<span class="trace_summary" title="<%= trace.timestamp %>"> ...
<% if trace.inserted %>
(<%= t'trace.trace.count_points', :count => trace.size.to_s.gsub(/(\d)(?=(\d{3})+$)/,'\1,') %>)
<% end %>
... <%= t'trace.trace.ago', :time_in_words_ago => time_ago_in_words(trace.timestamp) %></span>
<%= link_to t('trace.trace.more'), {:controller => 'trace', :action => 'view', :display_name => trace.user.display_name, :id => trace.id}, {:title => t('trace.trace.trace_details')} %> /
<%= link_to_if trace.inserted?, t('trace.trace.map'), {:controller => 'site', :action => 'index', :anchor => "map=14/#{trace.latitude}/#{trace.longitude}"}, {:title => t('trace.trace.view_map')} %> /
<%= link_to t('trace.trace.edit'), {:controller => 'site', :action => 'edit', :gpx => trace.id }, {:title => t('trace.trace.edit_map')} %>
<span class="trace_<%= trace.visibility %>"><%= t('trace.trace.' + trace.visibility) %></span>
<br />
<%= trace.description %>
<br />
<%= t'trace.trace.by' %> <%=link_to h(trace.user.display_name), {:controller => 'user', :action => 'view', :display_name => trace.user.display_name} %>
<% if !trace.tags.empty? %>
<%= t'trace.trace.in' %>
<%= raw(trace.tags.collect { |tag| link_to_tag tag.tag }.join(", ")) %>
<% end %>
</td>
</tr>
| 1 | 11,034 | I've fixed it for this change but for future reference there's no point doing string substitution for a single value like that - it will just slow things down. Just use `:mlat => trace.latitude` etc instead. | openstreetmap-openstreetmap-website | rb |
@@ -10,11 +10,7 @@ namespace Ergonode\Product\Application\Model\Product\Attribute\Update;
use Symfony\Component\Validator\Constraints as Assert;
use Ergonode\Attribute\Application\Validator as AttributeAssert;
-use Ergonode\Product\Application\Validator as ProductAssert;
-/**
- * @ProductAssert\ProductAttribute()
- */
class UpdateAttributeValueFormModel
{
/** | 1 | <?php
/**
* Copyright © Ergonode Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types=1);
namespace Ergonode\Product\Application\Model\Product\Attribute\Update;
use Symfony\Component\Validator\Constraints as Assert;
use Ergonode\Attribute\Application\Validator as AttributeAssert;
use Ergonode\Product\Application\Validator as ProductAssert;
/**
* @ProductAssert\ProductAttribute()
*/
class UpdateAttributeValueFormModel
{
/**
* @Assert\NotBlank()
* @Assert\Uuid(strict=true)
* @AttributeAssert\AttributeExists()
*/
public ?string $id = null;
/**
* @Assert\Valid()
*
* @var UpdateAttributeValueTranslationFormModel[]
*/
public array $values = [];
}
| 1 | 9,722 | The form no logger needs validation? | ergonode-backend | php |
@@ -42,7 +42,7 @@ namespace OpenTelemetry.Metrics
if (this.getMetrics != null)
{
var metricsToExport = this.getMetrics(this.isDelta);
- if (metricsToExport != null)
+ if (metricsToExport != null && metricsToExport.Metrics.Count > 0)
{
Batch<MetricItem> batch = new Batch<MetricItem>(metricsToExport);
this.exporter.Export(batch); | 1 | // <copyright file="PullMetricProcessor.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Threading;
using System.Threading.Tasks;
namespace OpenTelemetry.Metrics
{
public class PullMetricProcessor : MetricProcessor, IDisposable
{
private Func<bool, MetricItem> getMetrics;
private bool disposed;
private bool isDelta;
public PullMetricProcessor(BaseExporter<MetricItem> exporter, bool isDelta)
: base(exporter)
{
this.isDelta = isDelta;
}
public override void SetGetMetricFunction(Func<bool, MetricItem> getMetrics)
{
this.getMetrics = getMetrics;
}
public void PullRequest()
{
if (this.getMetrics != null)
{
var metricsToExport = this.getMetrics(this.isDelta);
if (metricsToExport != null)
{
Batch<MetricItem> batch = new Batch<MetricItem>(metricsToExport);
this.exporter.Export(batch);
}
}
}
/// <inheritdoc/>
protected override void Dispose(bool disposing)
{
base.Dispose(disposing);
if (disposing && !this.disposed)
{
try
{
this.exporter.Dispose();
}
catch (Exception)
{
// TODO: Log
}
this.disposed = true;
}
}
}
}
| 1 | 20,732 | there'd be more changes in the area, when we implement "clean-up" of aggregators if no new updates. (i think we currently send metricitem to exporters with zero as value) | open-telemetry-opentelemetry-dotnet | .cs |
@@ -52,7 +52,9 @@ class CollectionViewTest(BaseWebTest, unittest.TestCase):
self.app.put_json('/buckets/sodas',
MINIMALIST_BUCKET,
headers=self.headers)
- self.app.get(other_bucket, headers=self.headers, status=404)
+ resp = self.app.get(other_bucket, headers=self.headers, status=404)
+ self.assertIn('id', resp.json['details'])
+ self.assertIn('resource_name', resp.json['details'])
def test_create_permissions_can_be_added_on_collections(self):
collection = MINIMALIST_COLLECTION.copy() | 1 | import unittest
from kinto.core.testing import get_user_headers
from .support import (BaseWebTest, MINIMALIST_BUCKET,
MINIMALIST_COLLECTION, MINIMALIST_RECORD)
class CollectionViewTest(BaseWebTest, unittest.TestCase):
collections_url = '/buckets/beers/collections'
collection_url = '/buckets/beers/collections/barley'
def setUp(self):
super(CollectionViewTest, self).setUp()
self.app.put_json('/buckets/beers', MINIMALIST_BUCKET,
headers=self.headers)
resp = self.app.put_json(self.collection_url,
MINIMALIST_COLLECTION,
headers=self.headers)
self.record = resp.json['data']
def test_collection_endpoint_lists_them_all(self):
resp = self.app.get(self.collections_url, headers=self.headers)
records = resp.json['data']
self.assertEqual(len(records), 1)
self.assertEqual(records[0]['id'], 'barley')
def test_collections_can_be_put_with_simple_name(self):
self.assertEqual(self.record['id'], 'barley')
def test_collections_name_should_be_simple(self):
self.app.put_json('/buckets/beers/collections/__barley__',
MINIMALIST_COLLECTION,
headers=self.headers,
status=400)
def test_collections_should_reject_unaccepted_request_content_type(self):
headers = self.headers.copy()
headers['Content-Type'] = 'text/plain'
self.app.put('/buckets/beers/collections/barley',
MINIMALIST_COLLECTION,
headers=headers,
status=415)
def test_unknown_bucket_raises_403(self):
other_bucket = self.collections_url.replace('beers', 'sodas')
self.app.get(other_bucket, headers=self.headers, status=403)
def test_collections_are_isolated_by_bucket(self):
other_bucket = self.collection_url.replace('beers', 'sodas')
self.app.put_json('/buckets/sodas',
MINIMALIST_BUCKET,
headers=self.headers)
self.app.get(other_bucket, headers=self.headers, status=404)
def test_create_permissions_can_be_added_on_collections(self):
collection = MINIMALIST_COLLECTION.copy()
collection['permissions'] = {'record:create': ['fxa:user']}
resp = self.app.put_json('/buckets/beers/collections/barley',
collection,
headers=self.headers,
status=200)
permissions = resp.json['permissions']
self.assertIn('fxa:user', permissions['record:create'])
def test_wrong_create_permissions_cannot_be_added_on_collections(self):
collection = MINIMALIST_COLLECTION.copy()
collection['permissions'] = {'collection:create': ['fxa:user']}
self.app.put_json('/buckets/beers/collections/barley',
collection,
headers=self.headers,
status=400)
def test_collections_can_handle_arbitrary_attributes(self):
collection = MINIMALIST_COLLECTION.copy()
fingerprint = "5866f245a00bb3a39100d31b2f14d453"
collection['data'] = {'fingerprint': fingerprint}
resp = self.app.put_json('/buckets/beers/collections/barley',
collection,
headers=self.headers,
status=200)
data = resp.json['data']
self.assertIn('fingerprint', data)
self.assertEqual(data['fingerprint'], fingerprint)
def test_collections_can_be_filtered_by_arbitrary_attribute(self):
collection = MINIMALIST_COLLECTION.copy()
collection['data'] = {'size': 3}
self.app.put_json('/buckets/beers/collections/moderator',
collection,
headers=self.headers)
resp = self.app.get('/buckets/beers/collections?min_size=2',
headers=self.headers)
data = resp.json['data']
self.assertEqual(len(data), 1)
class CollectionDeletionTest(BaseWebTest, unittest.TestCase):
collection_url = '/buckets/beers/collections/barley'
def setUp(self):
super(CollectionDeletionTest, self).setUp()
bucket = MINIMALIST_BUCKET.copy()
bucket['permissions'] = {'collection:create': ['system.Everyone'],
'read': ['system.Everyone']}
self.app.put_json('/buckets/beers', bucket,
headers=self.headers)
self.app.put_json(self.collection_url, MINIMALIST_COLLECTION,
headers=self.headers)
r = self.app.post_json(self.collection_url + '/records',
MINIMALIST_RECORD,
headers=self.headers)
record_id = r.json['data']['id']
self.record_url = self.collection_url + '/records/%s' % record_id
self.app.delete(self.collection_url, headers=self.headers)
def test_collections_can_be_deleted(self):
self.app.get(self.collection_url, headers=self.headers,
status=404)
def test_collections_can_be_deleted_in_bulk(self):
alice_headers = get_user_headers('alice')
self.app.put_json('/buckets/beers/collections/1',
MINIMALIST_COLLECTION, headers=self.headers)
self.app.put_json('/buckets/beers/collections/2',
MINIMALIST_COLLECTION, headers=alice_headers)
self.app.put_json('/buckets/beers/collections/3',
MINIMALIST_COLLECTION, headers=alice_headers)
self.app.delete('/buckets/beers/collections',
headers=alice_headers)
resp = self.app.get('/buckets/beers/collections', headers=self.headers)
self.assertEqual(len(resp.json['data']), 1)
def test_records_of_collection_are_deleted_too(self):
self.app.put_json(self.collection_url, MINIMALIST_COLLECTION,
headers=self.headers)
self.app.get(self.record_url, headers=self.headers, status=404)
# Verify tombstones
resp = self.app.get('%s/records?_since=0' % self.collection_url,
headers=self.headers)
self.assertEqual(len(resp.json['data']), 0)
def test_can_be_created_after_deletion_with_if_none_match_star(self):
headers = self.headers.copy()
headers['If-None-Match'] = '*'
self.app.put_json(self.collection_url, MINIMALIST_COLLECTION,
headers=headers, status=201)
class CollectionCreationTest(BaseWebTest, unittest.TestCase):
collections_url = '/buckets/beers/collections'
def setUp(self):
super(CollectionCreationTest, self).setUp()
self.app.put_json('/buckets/beers', MINIMALIST_BUCKET,
headers=self.headers)
def test_collection_can_be_created_with_post(self):
r = self.app.post_json(self.collections_url,
MINIMALIST_COLLECTION,
headers=self.headers)
self.assertEqual(r.status_code, 201)
self.assertTrue(len(r.json['data']['id']) == 8)
def test_collection_can_be_specified_in_post(self):
collection = 'barley'
r = self.app.post_json(self.collections_url,
{'data': {'id': collection}},
headers=self.headers)
self.assertEqual(r.status_code, 201)
self.assertEqual(r.json['data']['id'], collection)
def test_collection_already_exists_post(self):
collection = "barley"
self.app.post_json(self.collections_url,
{'data': {'id': collection}},
headers=self.headers)
r = self.app.post_json(self.collections_url,
{'data': {'id': collection}},
headers=self.headers)
self.assertEqual(r.json['data']['id'], collection)
self.assertEqual(r.status_code, 200)
| 1 | 9,982 | Usually we only keep the assertions that are relevant to the specification (ie. the test title). For example, the test on the values of id and details is not properly relevant for the spec _collections are isolated by bucket_. I suggest that you only keep the assertions regarding `details` in dedicated tests similar to `test_unknown_collection_raises_404` from `test_views_records.py`, but for each of bucket, group, collection and record in the other `test_views_` files. | Kinto-kinto | py |
@@ -28,7 +28,7 @@ public class FieldAccessGenericParameter<T extends GenericClass<String, GenericC
<C extends Number> FieldAccessGenericParameter() {
C constructorGeneric = null;
- // access type dependant on constructor type arugments
+ // access type dependant on constructor type arguments
// Primary[Prefix[Name[localGeneric]]]
constructorGeneric = null; // Number
} | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.typeresolution.testdata;
import net.sourceforge.pmd.typeresolution.testdata.dummytypes.GenericClass;
public class FieldAccessGenericParameter<T extends GenericClass<String, GenericClass<String, Integer>>,
S extends Double> {
T parameterGeneric;
S classGeneric;
<M extends Character> void foo() {
M localGeneric = null;
// access type dependant on class/method type arguments
// Primary[Prefix[Name[classGeneric]]]
classGeneric = null; // Double
localGeneric = null; // Character
// test type parameters extending generic types
// Primary[Prefix[Name[parameterGeneric.first]]]
parameterGeneric.second.second = new Integer(0);
}
<C extends Number> FieldAccessGenericParameter() {
C constructorGeneric = null;
// access type dependant on constructor type arugments
// Primary[Prefix[Name[localGeneric]]]
constructorGeneric = null; // Number
}
}
| 1 | 17,814 | The test sources are ok though, you don't need to remove those changes | pmd-pmd | java |
@@ -72,7 +72,7 @@ define(['itemHelper', 'mediaInfo', 'indicators', 'connectionManager', 'layoutMan
var apiClient = connectionManager.getApiClient(item.ServerId);
var options = {
- width: width,
+ maxWidth: width * 2,
type: "Primary"
};
| 1 | define(['itemHelper', 'mediaInfo', 'indicators', 'connectionManager', 'layoutManager', 'globalize', 'datetime', 'apphost', 'css!./listview', 'emby-ratingbutton', 'emby-playstatebutton'], function (itemHelper, mediaInfo, indicators, connectionManager, layoutManager, globalize, datetime, appHost) {
'use strict';
function getIndex(item, options) {
if (options.index === 'disc') {
return item.ParentIndexNumber == null ? '' : globalize.translate('ValueDiscNumber', item.ParentIndexNumber);
}
var sortBy = (options.sortBy || '').toLowerCase();
var code;
var name;
if (sortBy.indexOf('sortname') === 0) {
if (item.Type === 'Episode') {
return '';
}
// SortName
name = (item.SortName || item.Name || '?')[0].toUpperCase();
code = name.charCodeAt(0);
if (code < 65 || code > 90) {
return '#';
}
return name.toUpperCase();
}
if (sortBy.indexOf('officialrating') === 0) {
return item.OfficialRating || globalize.translate('Unrated');
}
if (sortBy.indexOf('communityrating') === 0) {
if (item.CommunityRating == null) {
return globalize.translate('Unrated');
}
return Math.floor(item.CommunityRating);
}
if (sortBy.indexOf('criticrating') === 0) {
if (item.CriticRating == null) {
return globalize.translate('Unrated');
}
return Math.floor(item.CriticRating);
}
if (sortBy.indexOf('albumartist') === 0) {
// SortName
if (!item.AlbumArtist) {
return '';
}
name = item.AlbumArtist[0].toUpperCase();
code = name.charCodeAt(0);
if (code < 65 || code > 90) {
return '#';
}
return name.toUpperCase();
}
return '';
}
function getImageUrl(item, width) {
var apiClient = connectionManager.getApiClient(item.ServerId);
var options = {
width: width,
type: "Primary"
};
if (item.ImageTags && item.ImageTags.Primary) {
options.tag = item.ImageTags.Primary;
return apiClient.getScaledImageUrl(item.Id, options);
}
if (item.AlbumId && item.AlbumPrimaryImageTag) {
options.tag = item.AlbumPrimaryImageTag;
return apiClient.getScaledImageUrl(item.AlbumId, options);
} else if (item.SeriesId && item.SeriesPrimaryImageTag) {
options.tag = item.SeriesPrimaryImageTag;
return apiClient.getScaledImageUrl(item.SeriesId, options);
} else if (item.ParentPrimaryImageTag) {
options.tag = item.ParentPrimaryImageTag;
return apiClient.getScaledImageUrl(item.ParentPrimaryImageItemId, options);
}
return null;
}
function getChannelImageUrl(item, width) {
var apiClient = connectionManager.getApiClient(item.ServerId);
var options = {
width: width,
type: "Primary"
};
if (item.ChannelId && item.ChannelPrimaryImageTag) {
options.tag = item.ChannelPrimaryImageTag;
return apiClient.getScaledImageUrl(item.ChannelId, options);
}
return null;
}
function getTextLinesHtml(textlines, isLargeStyle) {
var html = '';
var largeTitleTagName = layoutManager.tv ? 'h2' : 'div';
for (var i = 0, length = textlines.length; i < length; i++) {
var text = textlines[i];
if (!text) {
continue;
}
if (i === 0) {
if (isLargeStyle) {
html += '<' + largeTitleTagName + ' class="listItemBodyText">';
} else {
html += '<div class="listItemBodyText">';
}
} else {
html += '<div class="secondary listItemBodyText">';
}
html += (textlines[i] || ' ');
if (i === 0 && isLargeStyle) {
html += '</' + largeTitleTagName + '>';
} else {
html += '</div>';
}
}
return html;
}
function getRightButtonsHtml(options) {
var html = '';
for (var i = 0, length = options.rightButtons.length; i < length; i++) {
var button = options.rightButtons[i];
html += '<button is="paper-icon-button-light" class="listItemButton itemAction" data-action="custom" data-customaction="' + button.id + '" title="' + button.title + '"><i class="material-icons">' + button.icon + '</i></button>';
}
return html;
}
function getId(item) {
return item.Id;
}
function getListViewHtml(options) {
var items = options.items;
var groupTitle = '';
var action = options.action || 'link';
var isLargeStyle = options.imageSize === 'large';
var enableOverview = options.enableOverview;
var clickEntireItem = layoutManager.tv ? true : false;
var outerTagName = clickEntireItem ? 'button' : 'div';
var enableSideMediaInfo = options.enableSideMediaInfo != null ? options.enableSideMediaInfo : true;
var outerHtml = '';
var enableContentWrapper = options.enableOverview && !layoutManager.tv;
var containerAlbumArtistIds = (options.containerAlbumArtists || []).map(getId);
for (var i = 0, length = items.length; i < length; i++) {
var item = items[i];
var html = '';
if (options.showIndex) {
var itemGroupTitle = getIndex(item, options);
if (itemGroupTitle !== groupTitle) {
if (html) {
html += '</div>';
}
if (i === 0) {
html += '<h2 class="listGroupHeader listGroupHeader-first">';
} else {
html += '<h2 class="listGroupHeader">';
}
html += itemGroupTitle;
html += '</h2>';
html += '<div>';
groupTitle = itemGroupTitle;
}
}
var cssClass = "listItem";
if (options.border || (options.highlight !== false && !layoutManager.tv)) {
cssClass += ' listItem-border';
}
if (clickEntireItem) {
cssClass += ' itemAction listItem-button';
}
if (layoutManager.tv) {
cssClass += ' listItem-focusscale';
}
var downloadWidth = 80;
if (isLargeStyle) {
cssClass += " listItem-largeImage";
downloadWidth = 500;
}
var playlistItemId = item.PlaylistItemId ? (' data-playlistitemid="' + item.PlaylistItemId + '"') : '';
var positionTicksData = item.UserData && item.UserData.PlaybackPositionTicks ? (' data-positionticks="' + item.UserData.PlaybackPositionTicks + '"') : '';
var collectionIdData = options.collectionId ? (' data-collectionid="' + options.collectionId + '"') : '';
var playlistIdData = options.playlistId ? (' data-playlistid="' + options.playlistId + '"') : '';
var mediaTypeData = item.MediaType ? (' data-mediatype="' + item.MediaType + '"') : '';
var collectionTypeData = item.CollectionType ? (' data-collectiontype="' + item.CollectionType + '"') : '';
var channelIdData = item.ChannelId ? (' data-channelid="' + item.ChannelId + '"') : '';
if (enableContentWrapper) {
cssClass += ' listItem-withContentWrapper';
}
html += '<' + outerTagName + ' class="' + cssClass + '"' + playlistItemId + ' data-action="' + action + '" data-isfolder="' + item.IsFolder + '" data-id="' + item.Id + '" data-serverid="' + item.ServerId + '" data-type="' + item.Type + '"' + mediaTypeData + collectionTypeData + channelIdData + positionTicksData + collectionIdData + playlistIdData + '>';
if (enableContentWrapper) {
html += '<div class="listItem-content">';
}
if (!clickEntireItem && options.dragHandle) {
//html += '<button is="paper-icon-button-light" class="listViewDragHandle listItemButton"><i class="material-icons drag_handle"></i></button>';
// Firefox and Edge are not allowing the button to be draggable
html += '<i class="listViewDragHandle material-icons listItemIcon listItemIcon-transparent drag_handle"></i>';
}
if (options.image !== false) {
var imgUrl = options.imageSource === 'channel' ? getChannelImageUrl(item, downloadWidth) : getImageUrl(item, downloadWidth);
var imageClass = isLargeStyle ? 'listItemImage listItemImage-large' : 'listItemImage';
if (isLargeStyle && layoutManager.tv) {
imageClass += ' listItemImage-large-tv';
}
var playOnImageClick = options.imagePlayButton && !layoutManager.tv;
if (!clickEntireItem) {
imageClass += ' itemAction';
}
var imageAction = playOnImageClick ? 'resume' : action;
if (imgUrl) {
html += '<div data-action="' + imageAction + '" class="' + imageClass + ' lazy" data-src="' + imgUrl + '" item-icon>';
} else {
html += '<div class="' + imageClass + '">';
}
var indicatorsHtml = '';
indicatorsHtml += indicators.getPlayedIndicatorHtml(item);
if (indicatorsHtml) {
html += '<div class="indicators listItemIndicators">' + indicatorsHtml + '</div>';
}
if (playOnImageClick) {
html += '<button is="paper-icon-button-light" class="listItemImageButton itemAction" data-action="resume"><i class="material-icons listItemImageButton-icon play_arrow"></i></button>';
}
var progressHtml = indicators.getProgressBarHtml(item, {
containerClass: 'listItemProgressBar'
});
if (progressHtml) {
html += progressHtml;
}
html += '</div>';
}
if (options.showIndexNumberLeft) {
html += '<div class="listItem-indexnumberleft">';
html += (item.IndexNumber || ' ');
html += '</div>';
}
var textlines = [];
if (options.showProgramDateTime) {
textlines.push(datetime.toLocaleString(datetime.parseISO8601Date(item.StartDate), {
weekday: 'long',
month: 'short',
day: 'numeric',
hour: 'numeric',
minute: '2-digit'
}));
}
if (options.showProgramTime) {
textlines.push(datetime.getDisplayTime(datetime.parseISO8601Date(item.StartDate)));
}
if (options.showChannel) {
if (item.ChannelName) {
textlines.push(item.ChannelName);
}
}
var parentTitle = null;
if (options.showParentTitle) {
if (item.Type === 'Episode') {
parentTitle = item.SeriesName;
} else if (item.IsSeries || (item.EpisodeTitle && item.Name)) {
parentTitle = item.Name;
}
}
var displayName = itemHelper.getDisplayName(item, {
includeParentInfo: options.includeParentInfoInTitle
});
if (options.showIndexNumber && item.IndexNumber != null) {
displayName = item.IndexNumber + ". " + displayName;
}
if (options.showParentTitle && options.parentTitleWithTitle) {
if (displayName) {
if (parentTitle) {
parentTitle += ' - ';
}
parentTitle = (parentTitle || '') + displayName;
}
textlines.push(parentTitle || '');
} else if (options.showParentTitle) {
textlines.push(parentTitle || '');
}
if (displayName && !options.parentTitleWithTitle) {
textlines.push(displayName);
}
if (item.IsFolder) {
if (options.artist !== false) {
if (item.AlbumArtist && item.Type === 'MusicAlbum') {
textlines.push(item.AlbumArtist);
}
}
} else {
var showArtist = options.artist === true;
var artistItems = item.ArtistItems;
if (!showArtist && options.artist !== false) {
if (!artistItems || !artistItems.length) {
showArtist = true;
} else if (artistItems.length > 1 || containerAlbumArtistIds.indexOf(artistItems[0].Id) === -1) {
showArtist = true;
}
}
if (showArtist) {
if (artistItems && item.Type !== 'MusicAlbum') {
textlines.push(artistItems.map(function (a) {
return a.Name;
}).join(', '));
}
}
}
if (item.Type === 'TvChannel') {
if (item.CurrentProgram) {
textlines.push(itemHelper.getDisplayName(item.CurrentProgram));
}
}
cssClass = 'listItemBody';
if (!clickEntireItem) {
cssClass += ' itemAction';
}
if (options.image === false) {
cssClass += ' listItemBody-noleftpadding';
}
html += '<div class="' + cssClass + '">';
var moreIcon = '';
html += getTextLinesHtml(textlines, isLargeStyle);
if (options.mediaInfo !== false) {
if (!enableSideMediaInfo) {
var mediaInfoClass = 'secondary listItemMediaInfo listItemBodyText';
html += '<div class="' + mediaInfoClass + '">' + mediaInfo.getPrimaryMediaInfoHtml(item, {
episodeTitle: false,
originalAirDate: false,
subtitles: false
}) + '</div>';
}
}
if (enableOverview && item.Overview) {
html += '<div class="secondary listItem-overview listItemBodyText">';
html += item.Overview;
html += '</div>';
}
html += '</div>';
if (options.mediaInfo !== false) {
if (enableSideMediaInfo) {
html += '<div class="secondary listItemMediaInfo">' + mediaInfo.getPrimaryMediaInfoHtml(item, {
year: false,
container: false,
episodeTitle: false,
criticRating: false,
endsAt: false
}) + '</div>';
}
}
if (!options.recordButton && (item.Type === 'Timer' || item.Type === 'Program')) {
html += indicators.getTimerIndicator(item).replace('indicatorIcon', 'indicatorIcon listItemAside');
}
html += '<div class="listViewUserDataButtons">';
if (!clickEntireItem) {
if (options.addToListButton) {
html += '<button is="paper-icon-button-light" class="listItemButton itemAction" data-action="addtoplaylist"><i class="material-icons playlist_add"></i></button>';
}
if (options.moreButton !== false) {
html += '<button is="paper-icon-button-light" class="listItemButton itemAction" data-action="menu"><i class="material-icons">' + moreIcon + '</i></button>';
}
if (options.infoButton) {
html += '<button is="paper-icon-button-light" class="listItemButton itemAction" data-action="link"><i class="material-icons info_outline"></i></button>';
}
if (options.rightButtons) {
html += getRightButtonsHtml(options);
}
if (options.enableUserDataButtons !== false) {
var userData = item.UserData || {};
var likes = userData.Likes == null ? '' : userData.Likes;
if (itemHelper.canMarkPlayed(item)) {
html += '<button is="emby-playstatebutton" type="button" class="listItemButton paper-icon-button-light" data-id="' + item.Id + '" data-serverid="' + item.ServerId + '" data-itemtype="' + item.Type + '" data-played="' + (userData.Played) + '"><i class="material-icons">check</i></button>';
}
if (itemHelper.canRate(item)) {
html += '<button is="emby-ratingbutton" type="button" class="listItemButton paper-icon-button-light" data-id="' + item.Id + '" data-serverid="' + item.ServerId + '" data-itemtype="' + item.Type + '" data-likes="' + likes + '" data-isfavorite="' + (userData.IsFavorite) + '"><i class="material-icons">favorite</i></button>';
}
}
}
html += '</div>';
if (enableContentWrapper) {
html += '</div>';
if (enableOverview && item.Overview) {
html += '<div class="listItem-bottomoverview secondary">';
html += item.Overview;
html += '</div>';
}
}
html += '</' + outerTagName + '>';
outerHtml += html;
}
return outerHtml;
}
return {
getListViewHtml: getListViewHtml
};
});
| 1 | 13,594 | `maxWidth: width * 2,` Potentially, this will be the same as `getImageUrl`: 80px or 500px. I can't verify this - need for normal LiveTV tuner. | jellyfin-jellyfin-web | js |
@@ -161,9 +161,11 @@ public class EnginesFilter extends AbstractFilter {
* @return True if the given engine category fulfills the filter, false otherwise
*/
public boolean filter(EngineCategoryDTO engineCategory) {
- return searchTerm.map(
- searchTerm -> engineCategory.getSubCategories().stream().anyMatch(engineSubCategory -> engineSubCategory
- .getPackages().stream().anyMatch(version -> version.getVersion().contains(searchTerm))))
+ return searchTerm
+ .map(searchTerm -> engineCategory.getSubCategories().stream()
+ .anyMatch(engineSubCategory -> engineSubCategory.getPackages().stream()
+ .anyMatch(version -> version.getVersion().toLowerCase()
+ .contains(searchTerm.toLowerCase()))))
.orElse(true);
}
| 1 | package org.phoenicis.javafx.views.mainwindow.engines;
import javafx.beans.property.BooleanProperty;
import javafx.beans.property.SimpleBooleanProperty;
import org.phoenicis.engines.dto.EngineCategoryDTO;
import org.phoenicis.engines.dto.EngineSubCategoryDTO;
import org.phoenicis.engines.dto.EngineVersionDTO;
import org.phoenicis.javafx.views.AbstractFilter;
import java.nio.file.Paths;
import java.util.Optional;
import java.util.function.Predicate;
/**
* This class represents a filter used for filtering the engine versions for
* <ul>
* <li>
* Installed engine versions
* </li>
* <li>
* Not installed engine versions
* </li>
* <li>
* Engine versions containing a search term
* </li>
* </ul>
* This filter depends on a previously defines {@link EngineSubCategoryDTO}.
*
* @author marc
* @since 23.04.17
*/
public class EnginesFilter extends AbstractFilter {
/**
* The path to the installed engines
*/
private String enginesPath;
/**
* The entered search term.
* If no search term has been entered, this value is {@link Optional#empty()}.
*/
private Optional<String> searchTerm;
/**
* The selected engine category.
* If no engine category has been selected, this value is {@link Optional#empty()}.
*/
private Optional<EngineCategoryDTO> selectedEngineCategory;
/**
* Are installed engines searched
*/
private BooleanProperty showInstalled;
/**
* Are not installed engines searched
*/
private BooleanProperty showNotInstalled;
/**
* Constructor
*
* @param enginesPath The path to the installed engines
*/
public EnginesFilter(String enginesPath) {
super();
this.enginesPath = enginesPath;
this.searchTerm = Optional.empty();
this.selectedEngineCategory = Optional.empty();
this.showInstalled = new SimpleBooleanProperty();
this.showInstalled
.addListener((observableValue, oldValue, newValue) -> this.triggerFilterChanged());
this.showNotInstalled = new SimpleBooleanProperty();
this.showNotInstalled
.addListener((observableValue, oldValue, newValue) -> this.triggerFilterChanged());
}
public BooleanProperty showInstalledProperty() {
return this.showInstalled;
}
public BooleanProperty showNotInstalledProperty() {
return this.showNotInstalled;
}
/**
* Sets the search term to the given string.
*
* @param searchTerm The new search term
*/
public void setSearchTerm(String searchTerm) {
this.searchTerm = Optional.of(searchTerm);
this.triggerFilterChanged();
}
/**
* Clears the search term
*/
public void clearSearchTerm() {
this.searchTerm = Optional.empty();
this.triggerFilterChanged();
}
/**
* Sets the selected engine category
*
* @param engineCategory The selected engine category
*/
public void setSelectedEngineCategory(EngineCategoryDTO engineCategory) {
this.selectedEngineCategory = Optional.ofNullable(engineCategory);
this.triggerFilterChanged();
}
/**
* This method checks if a given engine version has been installed
*
* @param engineVersionDTO The engine version to be checked
* @return True if the engine version is installed, false otherwise
*/
private boolean isInstalled(EngineCategoryDTO engineCategory, EngineSubCategoryDTO engineSubCategory,
EngineVersionDTO engineVersionDTO) {
return Paths.get(enginesPath, engineCategory.getName().toLowerCase(), engineSubCategory.getName(),
engineVersionDTO.getVersion()).toFile().exists();
}
/**
* Creates a new filter predicate for a {@link EngineCategoryDTO} and {@link EngineSubCategoryDTO}.
* This predicate then accepts a {@link EngineVersionDTO} object and returns true if the given object fulfills the
* filter predicate and false otherwise
*
* @param engineCategory The engine category
* @param engineSubCategory The engine sub category
* @return A new filter predicate
*/
public Predicate<EngineVersionDTO> createFilter(EngineCategoryDTO engineCategory,
EngineSubCategoryDTO engineSubCategory) {
return engineVersion -> {
final boolean containsSearchTerm = searchTerm
.map(searchTerm -> engineVersion.getVersion().toLowerCase().contains(searchTerm.toLowerCase()))
.orElse(true);
final boolean fulfillsShowInstalled = this.showInstalled.getValue()
&& isInstalled(engineCategory, engineSubCategory, engineVersion);
final boolean fulfillsShowNotInstalled = this.showNotInstalled.getValue()
&& !isInstalled(engineCategory, engineSubCategory, engineVersion);
return containsSearchTerm && (fulfillsShowInstalled || fulfillsShowNotInstalled);
};
}
/**
* Checks if the given engine category fulfills this filter
*
* @param engineCategory The engine category
* @return True if the given engine category fulfills the filter, false otherwise
*/
public boolean filter(EngineCategoryDTO engineCategory) {
return searchTerm.map(
searchTerm -> engineCategory.getSubCategories().stream().anyMatch(engineSubCategory -> engineSubCategory
.getPackages().stream().anyMatch(version -> version.getVersion().contains(searchTerm))))
.orElse(true);
}
/**
* Checks whether a given engine sub category tab is empty or not
*
* @param engineSubCategoryTab The engine sub category tab
* @return True if the given engine sub category tab is not empty, false otherwise
*/
private boolean isNotEmpty(EngineSubCategoryTab engineSubCategoryTab) {
return engineSubCategoryTab.getEngineSubCategory().getPackages().stream()
.anyMatch(engineSubCategoryTab.getFilterPredicate());
}
/**
* Checks if a given engine sub category tab fulfills this filter
*
* @param engineSubCategoryTab The engine sub category tab
* @return True if the given engine sub category tab fulfills the filter, false otherwise
*/
public boolean filter(EngineSubCategoryTab engineSubCategoryTab) {
return isNotEmpty(engineSubCategoryTab) && this.selectedEngineCategory
.map(selectedEngineCategory -> selectedEngineCategory.equals(engineSubCategoryTab.getEngineCategory()))
.orElse(true);
}
}
| 1 | 12,467 | Not really related to this issue but should we use fuzzy search here as well (like for apps)? | PhoenicisOrg-phoenicis | java |
@@ -11,10 +11,10 @@ const mdUsageStr = `Usage:
kbfstool md [<subcommand>] [<args>]
The possible subcommands are:
- dump Dump metadata objects
- check Check metadata objects and their associated blocks for errors
- reset Reset a broken top-level folder
-
+ dump Dump metadata objects
+ check Check metadata objects and their associated blocks for errors
+ reset Reset a broken top-level folder
+ forceQR Append a fake quota reclamation record to the folder history
`
func mdMain(ctx context.Context, config libkbfs.Config, args []string) (exitStatus int) { | 1 | package main
import (
"fmt"
"github.com/keybase/kbfs/libkbfs"
"golang.org/x/net/context"
)
const mdUsageStr = `Usage:
kbfstool md [<subcommand>] [<args>]
The possible subcommands are:
dump Dump metadata objects
check Check metadata objects and their associated blocks for errors
reset Reset a broken top-level folder
`
func mdMain(ctx context.Context, config libkbfs.Config, args []string) (exitStatus int) {
if len(args) < 1 {
fmt.Print(mdUsageStr)
return 1
}
cmd := args[0]
args = args[1:]
switch cmd {
case "dump":
return mdDump(ctx, config, args)
case "check":
return mdCheck(ctx, config, args)
case "reset":
return mdReset(ctx, config, args)
default:
printError("md", fmt.Errorf("unknown command '%s'", cmd))
return 1
}
}
| 1 | 14,600 | isn't the usual capitalization for subcommands force-qr? (don't care too much about it) | keybase-kbfs | go |
@@ -38,7 +38,7 @@ import java.lang.reflect.Proxy;
name = "ProxyNonConstantType",
link = "https://github.com/palantir/gradle-baseline#baseline-error-prone-checks",
linkType = LinkType.CUSTOM,
- severity = SeverityLevel.SUGGESTION,
+ severity = SeverityLevel.WARNING,
summary = "Proxy instances should be created using constant types known at compile time to allow native-image "
+ "behavior to match hotspot. Methods which build proxies should take a "
+ "`Function<InvocationHandler, ? extends T>` instead of arbitrary class references. " | 1 | /*
* (c) Copyright 2020 Palantir Technologies Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.palantir.baseline.errorprone;
import com.google.auto.service.AutoService;
import com.google.common.reflect.Reflection;
import com.google.errorprone.BugPattern;
import com.google.errorprone.BugPattern.LinkType;
import com.google.errorprone.BugPattern.SeverityLevel;
import com.google.errorprone.VisitorState;
import com.google.errorprone.bugpatterns.BugChecker;
import com.google.errorprone.matchers.Description;
import com.google.errorprone.matchers.Matcher;
import com.google.errorprone.matchers.method.MethodMatchers;
import com.sun.source.tree.ExpressionTree;
import com.sun.source.tree.MemberSelectTree;
import com.sun.source.tree.MethodInvocationTree;
import com.sun.source.tree.NewArrayTree;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Proxy;
@AutoService(BugChecker.class)
@BugPattern(
name = "ProxyNonConstantType",
link = "https://github.com/palantir/gradle-baseline#baseline-error-prone-checks",
linkType = LinkType.CUSTOM,
severity = SeverityLevel.SUGGESTION,
summary = "Proxy instances should be created using constant types known at compile time to allow native-image "
+ "behavior to match hotspot. Methods which build proxies should take a "
+ "`Function<InvocationHandler, ? extends T>` instead of arbitrary class references. "
+ "The proxy annotation processor can make this process much easier: "
+ "https://github.com/palantir/proxy-processor\n"
+ "See https://www.graalvm.org/reference-manual/native-image/DynamicProxy/#automatic-detection")
public final class ProxyNonConstantType extends BugChecker implements BugChecker.MethodInvocationTreeMatcher {
private static final Matcher<ExpressionTree> NEW_PROXY_INSTANCE_MATCHER =
MethodMatchers.staticMethod().onClass(Proxy.class.getName()).named("newProxyInstance");
private static final Matcher<ExpressionTree> REFLECTION_NEW_PROXY = MethodMatchers.staticMethod()
.onClass(Reflection.class.getName())
.named("newProxy")
.withParameters(Class.class.getName(), InvocationHandler.class.getName());
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
if (REFLECTION_NEW_PROXY.matches(tree, state)) {
return describeMatch(tree);
}
if (NEW_PROXY_INSTANCE_MATCHER.matches(tree, state)) {
ExpressionTree interfaces = tree.getArguments().get(1);
if (interfaces instanceof NewArrayTree) {
NewArrayTree newArrayTree = (NewArrayTree) interfaces;
for (ExpressionTree element : newArrayTree.getInitializers()) {
if (!isDirectClassAccess(element)) {
return describeMatch(interfaces);
}
}
}
}
return Description.NO_MATCH;
}
private static boolean isDirectClassAccess(ExpressionTree expressionTree) {
return expressionTree instanceof MemberSelectTree
&& ((MemberSelectTree) expressionTree).getIdentifier().contentEquals("class");
}
}
| 1 | 9,151 | Do you know how many repos have hit this? | palantir-gradle-baseline | java |
@@ -1,3 +1,13 @@
+<% if @filter.present? %>
+ <p><%= _(<<-TEXT
+ The data on the usage dashboard is historical in nature. This means that the number of records below may not
+ match the count shown on the usage dashboard. For example if one of your users created a plan in October and
+ then removed that plan in November, it would have been included on the usage dashboard's total for October but
+ would not appear in the list below.
+ TEXT
+ ) %></p>
+<% end %>
+
<div class="table-responsive">
<table class="table table-hover" id="my-plans">
<thead> | 1 | <div class="table-responsive">
<table class="table table-hover" id="my-plans">
<thead>
<tr>
<th scope="col"><%= _('Project Title') %> <%= paginable_sort_link('plans.title') %></th>
<th scope="col"><%= _('Template') %> <%= paginable_sort_link('templates.title') %></th>
<th scope="col"><%= _('Organisation') %> <%= paginable_sort_link('orgs.name') %></th>
<th scope="col"><%= _('Owner') %></th>
<th scope="col" class="date-column"><%= _('Updated') %> <%= paginable_sort_link('plans.updated_at') %></th>
<th scope="col"><%= _('Visibility') %></th>
</tr>
</thead>
<tbody>
<% scope.each do |plan| %>
<tr>
<td>
<% if plan.readable_by?(current_user.id) %>
<%= link_to "#{plan.title.length > 60 ? "#{plan.title[0..59]} ..." : plan.title}", plan_path(plan) %>
<% else %>
<%= plan.title.truncate(60) %>
<% end %>
</td>
<td><%= plan.template.title %></td>
<td><%= plan.owner.org.name %></td>
<td><%= plan.owner.name(false) %></td>
<td><%= l(plan.updated_at.to_date, formats: :short) %></td>
<td class="plan-visibility">
<%= plan.visibility === 'is_test' ? _('Test') : sanitize(display_visibility(plan.visibility)) %>
</td>
</tr>
<% end %>
</tbody>
</table>
</div>
| 1 | 18,982 | I'll be interested to see if our gettext rake tasks will pick up on this text to translate | DMPRoadmap-roadmap | rb |
@@ -933,6 +933,7 @@ void LuaScriptInterface::pushInstantSpell(lua_State* L, const InstantSpell& spel
setField(L, "mlevel", spell.getMagicLevel());
setField(L, "mana", spell.getMana());
setField(L, "manapercent", spell.getManaPercent());
+ setField(L, "params", spell.getHasParam());
setMetatable(L, -1, "Spell");
} | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include <boost/range/adaptor/reversed.hpp>
#include <fmt/format.h>
#include "luascript.h"
#include "chat.h"
#include "player.h"
#include "game.h"
#include "protocolstatus.h"
#include "spells.h"
#include "iologindata.h"
#include "iomapserialize.h"
#include "configmanager.h"
#include "teleport.h"
#include "databasemanager.h"
#include "bed.h"
#include "monster.h"
#include "scheduler.h"
#include "databasetasks.h"
#include "events.h"
#include "movement.h"
#include "globalevent.h"
#include "script.h"
#include "weapons.h"
extern Chat* g_chat;
extern Game g_game;
extern Monsters g_monsters;
extern ConfigManager g_config;
extern Vocations g_vocations;
extern Spells* g_spells;
extern Events* g_events;
extern Actions* g_actions;
extern TalkActions* g_talkActions;
extern CreatureEvents* g_creatureEvents;
extern MoveEvents* g_moveEvents;
extern GlobalEvents* g_globalEvents;
extern Scripts* g_scripts;
extern Weapons* g_weapons;
ScriptEnvironment::DBResultMap ScriptEnvironment::tempResults;
uint32_t ScriptEnvironment::lastResultId = 0;
std::multimap<ScriptEnvironment*, Item*> ScriptEnvironment::tempItems;
LuaEnvironment g_luaEnvironment;
ScriptEnvironment::ScriptEnvironment()
{
resetEnv();
}
ScriptEnvironment::~ScriptEnvironment()
{
resetEnv();
}
void ScriptEnvironment::resetEnv()
{
scriptId = 0;
callbackId = 0;
timerEvent = false;
interface = nullptr;
localMap.clear();
tempResults.clear();
auto pair = tempItems.equal_range(this);
auto it = pair.first;
while (it != pair.second) {
Item* item = it->second;
if (item->getParent() == VirtualCylinder::virtualCylinder) {
g_game.ReleaseItem(item);
}
it = tempItems.erase(it);
}
}
bool ScriptEnvironment::setCallbackId(int32_t callbackId, LuaScriptInterface* scriptInterface)
{
if (this->callbackId != 0) {
//nested callbacks are not allowed
if (interface) {
reportErrorFunc(interface->getLuaState(), "Nested callbacks!");
}
return false;
}
this->callbackId = callbackId;
interface = scriptInterface;
return true;
}
void ScriptEnvironment::getEventInfo(int32_t& scriptId, LuaScriptInterface*& scriptInterface, int32_t& callbackId, bool& timerEvent) const
{
scriptId = this->scriptId;
scriptInterface = interface;
callbackId = this->callbackId;
timerEvent = this->timerEvent;
}
uint32_t ScriptEnvironment::addThing(Thing* thing)
{
if (!thing || thing->isRemoved()) {
return 0;
}
Creature* creature = thing->getCreature();
if (creature) {
return creature->getID();
}
Item* item = thing->getItem();
if (item && item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
return item->getUniqueId();
}
for (const auto& it : localMap) {
if (it.second == item) {
return it.first;
}
}
localMap[++lastUID] = item;
return lastUID;
}
void ScriptEnvironment::insertItem(uint32_t uid, Item* item)
{
auto result = localMap.emplace(uid, item);
if (!result.second) {
std::cout << std::endl << "Lua Script Error: Thing uid already taken.";
}
}
Thing* ScriptEnvironment::getThingByUID(uint32_t uid)
{
if (uid >= 0x10000000) {
return g_game.getCreatureByID(uid);
}
if (uid <= std::numeric_limits<uint16_t>::max()) {
Item* item = g_game.getUniqueItem(uid);
if (item && !item->isRemoved()) {
return item;
}
return nullptr;
}
auto it = localMap.find(uid);
if (it != localMap.end()) {
Item* item = it->second;
if (!item->isRemoved()) {
return item;
}
}
return nullptr;
}
Item* ScriptEnvironment::getItemByUID(uint32_t uid)
{
Thing* thing = getThingByUID(uid);
if (!thing) {
return nullptr;
}
return thing->getItem();
}
Container* ScriptEnvironment::getContainerByUID(uint32_t uid)
{
Item* item = getItemByUID(uid);
if (!item) {
return nullptr;
}
return item->getContainer();
}
void ScriptEnvironment::removeItemByUID(uint32_t uid)
{
if (uid <= std::numeric_limits<uint16_t>::max()) {
g_game.removeUniqueItem(uid);
return;
}
auto it = localMap.find(uid);
if (it != localMap.end()) {
localMap.erase(it);
}
}
void ScriptEnvironment::addTempItem(Item* item)
{
tempItems.emplace(this, item);
}
void ScriptEnvironment::removeTempItem(Item* item)
{
for (auto it = tempItems.begin(), end = tempItems.end(); it != end; ++it) {
if (it->second == item) {
tempItems.erase(it);
break;
}
}
}
uint32_t ScriptEnvironment::addResult(DBResult_ptr res)
{
tempResults[++lastResultId] = res;
return lastResultId;
}
bool ScriptEnvironment::removeResult(uint32_t id)
{
auto it = tempResults.find(id);
if (it == tempResults.end()) {
return false;
}
tempResults.erase(it);
return true;
}
DBResult_ptr ScriptEnvironment::getResultByID(uint32_t id)
{
auto it = tempResults.find(id);
if (it == tempResults.end()) {
return nullptr;
}
return it->second;
}
std::string LuaScriptInterface::getErrorDesc(ErrorCode_t code)
{
switch (code) {
case LUA_ERROR_PLAYER_NOT_FOUND: return "Player not found";
case LUA_ERROR_CREATURE_NOT_FOUND: return "Creature not found";
case LUA_ERROR_ITEM_NOT_FOUND: return "Item not found";
case LUA_ERROR_THING_NOT_FOUND: return "Thing not found";
case LUA_ERROR_TILE_NOT_FOUND: return "Tile not found";
case LUA_ERROR_HOUSE_NOT_FOUND: return "House not found";
case LUA_ERROR_COMBAT_NOT_FOUND: return "Combat not found";
case LUA_ERROR_CONDITION_NOT_FOUND: return "Condition not found";
case LUA_ERROR_AREA_NOT_FOUND: return "Area not found";
case LUA_ERROR_CONTAINER_NOT_FOUND: return "Container not found";
case LUA_ERROR_VARIANT_NOT_FOUND: return "Variant not found";
case LUA_ERROR_VARIANT_UNKNOWN: return "Unknown variant type";
case LUA_ERROR_SPELL_NOT_FOUND: return "Spell not found";
default: return "Bad error code";
}
}
ScriptEnvironment LuaScriptInterface::scriptEnv[16];
int32_t LuaScriptInterface::scriptEnvIndex = -1;
LuaScriptInterface::LuaScriptInterface(std::string interfaceName) : interfaceName(std::move(interfaceName))
{
if (!g_luaEnvironment.getLuaState()) {
g_luaEnvironment.initState();
}
}
LuaScriptInterface::~LuaScriptInterface()
{
closeState();
}
bool LuaScriptInterface::reInitState()
{
g_luaEnvironment.clearCombatObjects(this);
g_luaEnvironment.clearAreaObjects(this);
closeState();
return initState();
}
/// Same as lua_pcall, but adds stack trace to error strings in called function.
int LuaScriptInterface::protectedCall(lua_State* L, int nargs, int nresults)
{
int error_index = lua_gettop(L) - nargs;
lua_pushcfunction(L, luaErrorHandler);
lua_insert(L, error_index);
int ret = lua_pcall(L, nargs, nresults, error_index);
lua_remove(L, error_index);
return ret;
}
int32_t LuaScriptInterface::loadFile(const std::string& file, Npc* npc /* = nullptr*/)
{
//loads file as a chunk at stack top
int ret = luaL_loadfile(luaState, file.c_str());
if (ret != 0) {
lastLuaError = popString(luaState);
return -1;
}
//check that it is loaded as a function
if (!isFunction(luaState, -1)) {
lua_pop(luaState, 1);
return -1;
}
loadingFile = file;
if (!reserveScriptEnv()) {
lua_pop(luaState, 1);
return -1;
}
ScriptEnvironment* env = getScriptEnv();
env->setScriptId(EVENT_ID_LOADING, this);
env->setNpc(npc);
//execute it
ret = protectedCall(luaState, 0, 0);
if (ret != 0) {
reportError(nullptr, popString(luaState));
resetScriptEnv();
return -1;
}
resetScriptEnv();
return 0;
}
int32_t LuaScriptInterface::getEvent(const std::string& eventName)
{
//get our events table
lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef);
if (!isTable(luaState, -1)) {
lua_pop(luaState, 1);
return -1;
}
//get current event function pointer
lua_getglobal(luaState, eventName.c_str());
if (!isFunction(luaState, -1)) {
lua_pop(luaState, 2);
return -1;
}
//save in our events table
lua_pushvalue(luaState, -1);
lua_rawseti(luaState, -3, runningEventId);
lua_pop(luaState, 2);
//reset global value of this event
lua_pushnil(luaState);
lua_setglobal(luaState, eventName.c_str());
cacheFiles[runningEventId] = loadingFile + ":" + eventName;
return runningEventId++;
}
int32_t LuaScriptInterface::getEvent()
{
//check if function is on the stack
if (!isFunction(luaState, -1)) {
return -1;
}
//get our events table
lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef);
if (!isTable(luaState, -1)) {
lua_pop(luaState, 1);
return -1;
}
//save in our events table
lua_pushvalue(luaState, -2);
lua_rawseti(luaState, -2, runningEventId);
lua_pop(luaState, 2);
cacheFiles[runningEventId] = loadingFile + ":callback";
return runningEventId++;
}
int32_t LuaScriptInterface::getMetaEvent(const std::string& globalName, const std::string& eventName)
{
//get our events table
lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef);
if (!isTable(luaState, -1)) {
lua_pop(luaState, 1);
return -1;
}
//get current event function pointer
lua_getglobal(luaState, globalName.c_str());
lua_getfield(luaState, -1, eventName.c_str());
if (!isFunction(luaState, -1)) {
lua_pop(luaState, 3);
return -1;
}
//save in our events table
lua_pushvalue(luaState, -1);
lua_rawseti(luaState, -4, runningEventId);
lua_pop(luaState, 1);
//reset global value of this event
lua_pushnil(luaState);
lua_setfield(luaState, -2, eventName.c_str());
lua_pop(luaState, 2);
cacheFiles[runningEventId] = loadingFile + ":" + globalName + "@" + eventName;
return runningEventId++;
}
const std::string& LuaScriptInterface::getFileById(int32_t scriptId)
{
if (scriptId == EVENT_ID_LOADING) {
return loadingFile;
}
auto it = cacheFiles.find(scriptId);
if (it == cacheFiles.end()) {
static const std::string& unk = "(Unknown scriptfile)";
return unk;
}
return it->second;
}
std::string LuaScriptInterface::getStackTrace(lua_State* L, const std::string& error_desc)
{
lua_getglobal(L, "debug");
if (!isTable(L, -1)) {
lua_pop(L, 1);
return error_desc;
}
lua_getfield(L, -1, "traceback");
if (!isFunction(L, -1)) {
lua_pop(L, 2);
return error_desc;
}
lua_replace(L, -2);
pushString(L, error_desc);
lua_call(L, 1, 1);
return popString(L);
}
void LuaScriptInterface::reportError(const char* function, const std::string& error_desc, lua_State* L /*= nullptr*/, bool stack_trace /*= false*/)
{
int32_t scriptId;
int32_t callbackId;
bool timerEvent;
LuaScriptInterface* scriptInterface;
getScriptEnv()->getEventInfo(scriptId, scriptInterface, callbackId, timerEvent);
std::cout << std::endl << "Lua Script Error: ";
if (scriptInterface) {
std::cout << '[' << scriptInterface->getInterfaceName() << "] " << std::endl;
if (timerEvent) {
std::cout << "in a timer event called from: " << std::endl;
}
if (callbackId) {
std::cout << "in callback: " << scriptInterface->getFileById(callbackId) << std::endl;
}
std::cout << scriptInterface->getFileById(scriptId) << std::endl;
}
if (function) {
std::cout << function << "(). ";
}
if (L && stack_trace) {
std::cout << getStackTrace(L, error_desc) << std::endl;
} else {
std::cout << error_desc << std::endl;
}
}
bool LuaScriptInterface::pushFunction(int32_t functionId)
{
lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef);
if (!isTable(luaState, -1)) {
return false;
}
lua_rawgeti(luaState, -1, functionId);
lua_replace(luaState, -2);
return isFunction(luaState, -1);
}
bool LuaScriptInterface::initState()
{
luaState = g_luaEnvironment.getLuaState();
if (!luaState) {
return false;
}
lua_newtable(luaState);
eventTableRef = luaL_ref(luaState, LUA_REGISTRYINDEX);
runningEventId = EVENT_ID_USER;
return true;
}
bool LuaScriptInterface::closeState()
{
if (!g_luaEnvironment.getLuaState() || !luaState) {
return false;
}
cacheFiles.clear();
if (eventTableRef != -1) {
luaL_unref(luaState, LUA_REGISTRYINDEX, eventTableRef);
eventTableRef = -1;
}
luaState = nullptr;
return true;
}
int LuaScriptInterface::luaErrorHandler(lua_State* L)
{
const std::string& errorMessage = popString(L);
pushString(L, LuaScriptInterface::getStackTrace(L, errorMessage));
return 1;
}
bool LuaScriptInterface::callFunction(int params)
{
bool result = false;
int size = lua_gettop(luaState);
if (protectedCall(luaState, params, 1) != 0) {
LuaScriptInterface::reportError(nullptr, LuaScriptInterface::getString(luaState, -1));
} else {
result = LuaScriptInterface::getBoolean(luaState, -1);
}
lua_pop(luaState, 1);
if ((lua_gettop(luaState) + params + 1) != size) {
LuaScriptInterface::reportError(nullptr, "Stack size changed!");
}
resetScriptEnv();
return result;
}
void LuaScriptInterface::callVoidFunction(int params)
{
int size = lua_gettop(luaState);
if (protectedCall(luaState, params, 0) != 0) {
LuaScriptInterface::reportError(nullptr, LuaScriptInterface::popString(luaState));
}
if ((lua_gettop(luaState) + params + 1) != size) {
LuaScriptInterface::reportError(nullptr, "Stack size changed!");
}
resetScriptEnv();
}
void LuaScriptInterface::pushVariant(lua_State* L, const LuaVariant& var)
{
lua_createtable(L, 0, 2);
setField(L, "type", var.type);
switch (var.type) {
case VARIANT_NUMBER:
setField(L, "number", var.number);
break;
case VARIANT_STRING:
setField(L, "string", var.text);
break;
case VARIANT_TARGETPOSITION:
case VARIANT_POSITION: {
pushPosition(L, var.pos);
lua_setfield(L, -2, "pos");
break;
}
default:
break;
}
setMetatable(L, -1, "Variant");
}
void LuaScriptInterface::pushThing(lua_State* L, Thing* thing)
{
if (!thing) {
lua_createtable(L, 0, 4);
setField(L, "uid", 0);
setField(L, "itemid", 0);
setField(L, "actionid", 0);
setField(L, "type", 0);
return;
}
if (Item* item = thing->getItem()) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else if (Creature* creature = thing->getCreature()) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else {
lua_pushnil(L);
}
}
void LuaScriptInterface::pushCylinder(lua_State* L, Cylinder* cylinder)
{
if (Creature* creature = cylinder->getCreature()) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else if (Item* parentItem = cylinder->getItem()) {
pushUserdata<Item>(L, parentItem);
setItemMetatable(L, -1, parentItem);
} else if (Tile* tile = cylinder->getTile()) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else if (cylinder == VirtualCylinder::virtualCylinder) {
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
}
void LuaScriptInterface::pushString(lua_State* L, const std::string& value)
{
lua_pushlstring(L, value.c_str(), value.length());
}
void LuaScriptInterface::pushCallback(lua_State* L, int32_t callback)
{
lua_rawgeti(L, LUA_REGISTRYINDEX, callback);
}
std::string LuaScriptInterface::popString(lua_State* L)
{
if (lua_gettop(L) == 0) {
return std::string();
}
std::string str(getString(L, -1));
lua_pop(L, 1);
return str;
}
int32_t LuaScriptInterface::popCallback(lua_State* L)
{
return luaL_ref(L, LUA_REGISTRYINDEX);
}
// Metatables
void LuaScriptInterface::setMetatable(lua_State* L, int32_t index, const std::string& name)
{
luaL_getmetatable(L, name.c_str());
lua_setmetatable(L, index - 1);
}
void LuaScriptInterface::setWeakMetatable(lua_State* L, int32_t index, const std::string& name)
{
static std::set<std::string> weakObjectTypes;
const std::string& weakName = name + "_weak";
auto result = weakObjectTypes.emplace(name);
if (result.second) {
luaL_getmetatable(L, name.c_str());
int childMetatable = lua_gettop(L);
luaL_newmetatable(L, weakName.c_str());
int metatable = lua_gettop(L);
static const std::vector<std::string> methodKeys = {"__index", "__metatable", "__eq"};
for (const std::string& metaKey : methodKeys) {
lua_getfield(L, childMetatable, metaKey.c_str());
lua_setfield(L, metatable, metaKey.c_str());
}
static const std::vector<int> methodIndexes = {'h', 'p', 't'};
for (int metaIndex : methodIndexes) {
lua_rawgeti(L, childMetatable, metaIndex);
lua_rawseti(L, metatable, metaIndex);
}
lua_pushnil(L);
lua_setfield(L, metatable, "__gc");
lua_remove(L, childMetatable);
} else {
luaL_getmetatable(L, weakName.c_str());
}
lua_setmetatable(L, index - 1);
}
void LuaScriptInterface::setItemMetatable(lua_State* L, int32_t index, const Item* item)
{
if (item->getContainer()) {
luaL_getmetatable(L, "Container");
} else if (item->getTeleport()) {
luaL_getmetatable(L, "Teleport");
} else {
luaL_getmetatable(L, "Item");
}
lua_setmetatable(L, index - 1);
}
void LuaScriptInterface::setCreatureMetatable(lua_State* L, int32_t index, const Creature* creature)
{
if (creature->getPlayer()) {
luaL_getmetatable(L, "Player");
} else if (creature->getMonster()) {
luaL_getmetatable(L, "Monster");
} else {
luaL_getmetatable(L, "Npc");
}
lua_setmetatable(L, index - 1);
}
// Get
std::string LuaScriptInterface::getString(lua_State* L, int32_t arg)
{
size_t len;
const char* c_str = lua_tolstring(L, arg, &len);
if (!c_str || len == 0) {
return std::string();
}
return std::string(c_str, len);
}
Position LuaScriptInterface::getPosition(lua_State* L, int32_t arg, int32_t& stackpos)
{
Position position;
position.x = getField<uint16_t>(L, arg, "x");
position.y = getField<uint16_t>(L, arg, "y");
position.z = getField<uint8_t>(L, arg, "z");
lua_getfield(L, arg, "stackpos");
if (lua_isnil(L, -1) == 1) {
stackpos = 0;
} else {
stackpos = getNumber<int32_t>(L, -1);
}
lua_pop(L, 4);
return position;
}
Position LuaScriptInterface::getPosition(lua_State* L, int32_t arg)
{
Position position;
position.x = getField<uint16_t>(L, arg, "x");
position.y = getField<uint16_t>(L, arg, "y");
position.z = getField<uint8_t>(L, arg, "z");
lua_pop(L, 3);
return position;
}
Outfit_t LuaScriptInterface::getOutfit(lua_State* L, int32_t arg)
{
Outfit_t outfit;
outfit.lookMount = getField<uint16_t>(L, arg, "lookMount");
outfit.lookAddons = getField<uint8_t>(L, arg, "lookAddons");
outfit.lookFeet = getField<uint8_t>(L, arg, "lookFeet");
outfit.lookLegs = getField<uint8_t>(L, arg, "lookLegs");
outfit.lookBody = getField<uint8_t>(L, arg, "lookBody");
outfit.lookHead = getField<uint8_t>(L, arg, "lookHead");
outfit.lookTypeEx = getField<uint16_t>(L, arg, "lookTypeEx");
outfit.lookType = getField<uint16_t>(L, arg, "lookType");
lua_pop(L, 8);
return outfit;
}
Outfit LuaScriptInterface::getOutfitClass(lua_State* L, int32_t arg)
{
uint16_t lookType = getField<uint16_t>(L, arg, "lookType");
const std::string& name = getFieldString(L, arg, "name");
bool premium = getField<uint8_t>(L, arg, "premium") == 1;
bool unlocked = getField<uint8_t>(L, arg, "unlocked") == 1;
lua_pop(L, 4);
return Outfit(name, lookType, premium, unlocked);
}
LuaVariant LuaScriptInterface::getVariant(lua_State* L, int32_t arg)
{
LuaVariant var;
switch (var.type = getField<LuaVariantType_t>(L, arg, "type")) {
case VARIANT_NUMBER: {
var.number = getField<uint32_t>(L, arg, "number");
lua_pop(L, 2);
break;
}
case VARIANT_STRING: {
var.text = getFieldString(L, arg, "string");
lua_pop(L, 2);
break;
}
case VARIANT_POSITION:
case VARIANT_TARGETPOSITION: {
lua_getfield(L, arg, "pos");
var.pos = getPosition(L, lua_gettop(L));
lua_pop(L, 2);
break;
}
default: {
var.type = VARIANT_NONE;
lua_pop(L, 1);
break;
}
}
return var;
}
InstantSpell* LuaScriptInterface::getInstantSpell(lua_State* L, int32_t arg)
{
InstantSpell* spell = g_spells->getInstantSpellByName(getFieldString(L, arg, "name"));
lua_pop(L, 1);
return spell;
}
Thing* LuaScriptInterface::getThing(lua_State* L, int32_t arg)
{
Thing* thing;
if (lua_getmetatable(L, arg) != 0) {
lua_rawgeti(L, -1, 't');
switch(getNumber<uint32_t>(L, -1)) {
case LuaData_Item:
thing = getUserdata<Item>(L, arg);
break;
case LuaData_Container:
thing = getUserdata<Container>(L, arg);
break;
case LuaData_Teleport:
thing = getUserdata<Teleport>(L, arg);
break;
case LuaData_Player:
thing = getUserdata<Player>(L, arg);
break;
case LuaData_Monster:
thing = getUserdata<Monster>(L, arg);
break;
case LuaData_Npc:
thing = getUserdata<Npc>(L, arg);
break;
default:
thing = nullptr;
break;
}
lua_pop(L, 2);
} else {
thing = getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, arg));
}
return thing;
}
Creature* LuaScriptInterface::getCreature(lua_State* L, int32_t arg)
{
if (isUserdata(L, arg)) {
return getUserdata<Creature>(L, arg);
}
return g_game.getCreatureByID(getNumber<uint32_t>(L, arg));
}
Player* LuaScriptInterface::getPlayer(lua_State* L, int32_t arg)
{
if (isUserdata(L, arg)) {
return getUserdata<Player>(L, arg);
}
return g_game.getPlayerByID(getNumber<uint32_t>(L, arg));
}
std::string LuaScriptInterface::getFieldString(lua_State* L, int32_t arg, const std::string& key)
{
lua_getfield(L, arg, key.c_str());
return getString(L, -1);
}
LuaDataType LuaScriptInterface::getUserdataType(lua_State* L, int32_t arg)
{
if (lua_getmetatable(L, arg) == 0) {
return LuaData_Unknown;
}
lua_rawgeti(L, -1, 't');
LuaDataType type = getNumber<LuaDataType>(L, -1);
lua_pop(L, 2);
return type;
}
// Push
void LuaScriptInterface::pushBoolean(lua_State* L, bool value)
{
lua_pushboolean(L, value ? 1 : 0);
}
void LuaScriptInterface::pushCombatDamage(lua_State* L, const CombatDamage& damage)
{
lua_pushnumber(L, damage.primary.value);
lua_pushnumber(L, damage.primary.type);
lua_pushnumber(L, damage.secondary.value);
lua_pushnumber(L, damage.secondary.type);
lua_pushnumber(L, damage.origin);
}
void LuaScriptInterface::pushInstantSpell(lua_State* L, const InstantSpell& spell)
{
lua_createtable(L, 0, 6);
setField(L, "name", spell.getName());
setField(L, "words", spell.getWords());
setField(L, "level", spell.getLevel());
setField(L, "mlevel", spell.getMagicLevel());
setField(L, "mana", spell.getMana());
setField(L, "manapercent", spell.getManaPercent());
setMetatable(L, -1, "Spell");
}
void LuaScriptInterface::pushPosition(lua_State* L, const Position& position, int32_t stackpos/* = 0*/)
{
lua_createtable(L, 0, 4);
setField(L, "x", position.x);
setField(L, "y", position.y);
setField(L, "z", position.z);
setField(L, "stackpos", stackpos);
setMetatable(L, -1, "Position");
}
void LuaScriptInterface::pushOutfit(lua_State* L, const Outfit_t& outfit)
{
lua_createtable(L, 0, 8);
setField(L, "lookType", outfit.lookType);
setField(L, "lookTypeEx", outfit.lookTypeEx);
setField(L, "lookHead", outfit.lookHead);
setField(L, "lookBody", outfit.lookBody);
setField(L, "lookLegs", outfit.lookLegs);
setField(L, "lookFeet", outfit.lookFeet);
setField(L, "lookAddons", outfit.lookAddons);
setField(L, "lookMount", outfit.lookMount);
}
void LuaScriptInterface::pushOutfit(lua_State* L, const Outfit* outfit)
{
lua_createtable(L, 0, 4);
setField(L, "lookType", outfit->lookType);
setField(L, "name", outfit->name);
setField(L, "premium", outfit->premium);
setField(L, "unlocked", outfit->unlocked);
setMetatable(L, -1, "Outfit");
}
void LuaScriptInterface::pushLoot(lua_State* L, const std::vector<LootBlock>& lootList)
{
lua_createtable(L, lootList.size(), 0);
int index = 0;
for (const auto& lootBlock : lootList) {
lua_createtable(L, 0, 7);
setField(L, "itemId", lootBlock.id);
setField(L, "chance", lootBlock.chance);
setField(L, "subType", lootBlock.subType);
setField(L, "maxCount", lootBlock.countmax);
setField(L, "actionId", lootBlock.actionId);
setField(L, "text", lootBlock.text);
pushLoot(L, lootBlock.childLoot);
lua_setfield(L, -2, "childLoot");
lua_rawseti(L, -2, ++index);
}
}
#define registerEnum(value) { std::string enumName = #value; registerGlobalVariable(enumName.substr(enumName.find_last_of(':') + 1), value); }
#define registerEnumIn(tableName, value) { std::string enumName = #value; registerVariable(tableName, enumName.substr(enumName.find_last_of(':') + 1), value); }
void LuaScriptInterface::registerFunctions()
{
//doPlayerAddItem(uid, itemid, <optional: default: 1> count/subtype)
//doPlayerAddItem(cid, itemid, <optional: default: 1> count, <optional: default: 1> canDropOnMap, <optional: default: 1>subtype)
//Returns uid of the created item
lua_register(luaState, "doPlayerAddItem", LuaScriptInterface::luaDoPlayerAddItem);
//isValidUID(uid)
lua_register(luaState, "isValidUID", LuaScriptInterface::luaIsValidUID);
//isDepot(uid)
lua_register(luaState, "isDepot", LuaScriptInterface::luaIsDepot);
//isMovable(uid)
lua_register(luaState, "isMovable", LuaScriptInterface::luaIsMoveable);
//doAddContainerItem(uid, itemid, <optional> count/subtype)
lua_register(luaState, "doAddContainerItem", LuaScriptInterface::luaDoAddContainerItem);
//getDepotId(uid)
lua_register(luaState, "getDepotId", LuaScriptInterface::luaGetDepotId);
//getWorldTime()
lua_register(luaState, "getWorldTime", LuaScriptInterface::luaGetWorldTime);
//getWorldLight()
lua_register(luaState, "getWorldLight", LuaScriptInterface::luaGetWorldLight);
//setWorldLight(level, color)
lua_register(luaState, "setWorldLight", LuaScriptInterface::luaSetWorldLight);
//getWorldUpTime()
lua_register(luaState, "getWorldUpTime", LuaScriptInterface::luaGetWorldUpTime);
// getSubTypeName(subType)
lua_register(luaState, "getSubTypeName", LuaScriptInterface::luaGetSubTypeName);
//createCombatArea( {area}, <optional> {extArea} )
lua_register(luaState, "createCombatArea", LuaScriptInterface::luaCreateCombatArea);
//doAreaCombat(cid, type, pos, area, min, max, effect[, origin = ORIGIN_SPELL[, blockArmor = false[, blockShield = false[, ignoreResistances = false]]]])
lua_register(luaState, "doAreaCombat", LuaScriptInterface::luaDoAreaCombat);
//doTargetCombat(cid, target, type, min, max, effect[, origin = ORIGIN_SPELL[, blockArmor = false[, blockShield = false[, ignoreResistances = false]]]])
lua_register(luaState, "doTargetCombat", LuaScriptInterface::luaDoTargetCombat);
//doChallengeCreature(cid, target[, force = false])
lua_register(luaState, "doChallengeCreature", LuaScriptInterface::luaDoChallengeCreature);
//addEvent(callback, delay, ...)
lua_register(luaState, "addEvent", LuaScriptInterface::luaAddEvent);
//stopEvent(eventid)
lua_register(luaState, "stopEvent", LuaScriptInterface::luaStopEvent);
//saveServer()
lua_register(luaState, "saveServer", LuaScriptInterface::luaSaveServer);
//cleanMap()
lua_register(luaState, "cleanMap", LuaScriptInterface::luaCleanMap);
//debugPrint(text)
lua_register(luaState, "debugPrint", LuaScriptInterface::luaDebugPrint);
//isInWar(cid, target)
lua_register(luaState, "isInWar", LuaScriptInterface::luaIsInWar);
//getWaypointPosition(name)
lua_register(luaState, "getWaypointPositionByName", LuaScriptInterface::luaGetWaypointPositionByName);
//sendChannelMessage(channelId, type, message)
lua_register(luaState, "sendChannelMessage", LuaScriptInterface::luaSendChannelMessage);
//sendGuildChannelMessage(guildId, type, message)
lua_register(luaState, "sendGuildChannelMessage", LuaScriptInterface::luaSendGuildChannelMessage);
//isScriptsInterface()
lua_register(luaState, "isScriptsInterface", LuaScriptInterface::luaIsScriptsInterface);
#ifndef LUAJIT_VERSION
//bit operations for Lua, based on bitlib project release 24
//bit.bnot, bit.band, bit.bor, bit.bxor, bit.lshift, bit.rshift
luaL_register(luaState, "bit", LuaScriptInterface::luaBitReg);
lua_pop(luaState, 1);
#endif
//configManager table
luaL_register(luaState, "configManager", LuaScriptInterface::luaConfigManagerTable);
lua_pop(luaState, 1);
//db table
luaL_register(luaState, "db", LuaScriptInterface::luaDatabaseTable);
lua_pop(luaState, 1);
//result table
luaL_register(luaState, "result", LuaScriptInterface::luaResultTable);
lua_pop(luaState, 1);
/* New functions */
//registerClass(className, baseClass, newFunction)
//registerTable(tableName)
//registerMethod(className, functionName, function)
//registerMetaMethod(className, functionName, function)
//registerGlobalMethod(functionName, function)
//registerVariable(tableName, name, value)
//registerGlobalVariable(name, value)
//registerEnum(value)
//registerEnumIn(tableName, value)
// Enums
registerEnum(ACCOUNT_TYPE_NORMAL)
registerEnum(ACCOUNT_TYPE_TUTOR)
registerEnum(ACCOUNT_TYPE_SENIORTUTOR)
registerEnum(ACCOUNT_TYPE_GAMEMASTER)
registerEnum(ACCOUNT_TYPE_COMMUNITYMANAGER)
registerEnum(ACCOUNT_TYPE_GOD)
registerEnum(AMMO_NONE)
registerEnum(AMMO_BOLT)
registerEnum(AMMO_ARROW)
registerEnum(AMMO_SPEAR)
registerEnum(AMMO_THROWINGSTAR)
registerEnum(AMMO_THROWINGKNIFE)
registerEnum(AMMO_STONE)
registerEnum(AMMO_SNOWBALL)
registerEnum(BUG_CATEGORY_MAP)
registerEnum(BUG_CATEGORY_TYPO)
registerEnum(BUG_CATEGORY_TECHNICAL)
registerEnum(BUG_CATEGORY_OTHER)
registerEnum(CALLBACK_PARAM_LEVELMAGICVALUE)
registerEnum(CALLBACK_PARAM_SKILLVALUE)
registerEnum(CALLBACK_PARAM_TARGETTILE)
registerEnum(CALLBACK_PARAM_TARGETCREATURE)
registerEnum(COMBAT_FORMULA_UNDEFINED)
registerEnum(COMBAT_FORMULA_LEVELMAGIC)
registerEnum(COMBAT_FORMULA_SKILL)
registerEnum(COMBAT_FORMULA_DAMAGE)
registerEnum(DIRECTION_NORTH)
registerEnum(DIRECTION_EAST)
registerEnum(DIRECTION_SOUTH)
registerEnum(DIRECTION_WEST)
registerEnum(DIRECTION_SOUTHWEST)
registerEnum(DIRECTION_SOUTHEAST)
registerEnum(DIRECTION_NORTHWEST)
registerEnum(DIRECTION_NORTHEAST)
registerEnum(COMBAT_NONE)
registerEnum(COMBAT_PHYSICALDAMAGE)
registerEnum(COMBAT_ENERGYDAMAGE)
registerEnum(COMBAT_EARTHDAMAGE)
registerEnum(COMBAT_FIREDAMAGE)
registerEnum(COMBAT_UNDEFINEDDAMAGE)
registerEnum(COMBAT_LIFEDRAIN)
registerEnum(COMBAT_MANADRAIN)
registerEnum(COMBAT_HEALING)
registerEnum(COMBAT_DROWNDAMAGE)
registerEnum(COMBAT_ICEDAMAGE)
registerEnum(COMBAT_HOLYDAMAGE)
registerEnum(COMBAT_DEATHDAMAGE)
registerEnum(COMBAT_PARAM_TYPE)
registerEnum(COMBAT_PARAM_EFFECT)
registerEnum(COMBAT_PARAM_DISTANCEEFFECT)
registerEnum(COMBAT_PARAM_BLOCKSHIELD)
registerEnum(COMBAT_PARAM_BLOCKARMOR)
registerEnum(COMBAT_PARAM_TARGETCASTERORTOPMOST)
registerEnum(COMBAT_PARAM_CREATEITEM)
registerEnum(COMBAT_PARAM_AGGRESSIVE)
registerEnum(COMBAT_PARAM_DISPEL)
registerEnum(COMBAT_PARAM_USECHARGES)
registerEnum(CONDITION_NONE)
registerEnum(CONDITION_POISON)
registerEnum(CONDITION_FIRE)
registerEnum(CONDITION_ENERGY)
registerEnum(CONDITION_BLEEDING)
registerEnum(CONDITION_HASTE)
registerEnum(CONDITION_PARALYZE)
registerEnum(CONDITION_OUTFIT)
registerEnum(CONDITION_INVISIBLE)
registerEnum(CONDITION_LIGHT)
registerEnum(CONDITION_MANASHIELD)
registerEnum(CONDITION_INFIGHT)
registerEnum(CONDITION_DRUNK)
registerEnum(CONDITION_EXHAUST_WEAPON)
registerEnum(CONDITION_REGENERATION)
registerEnum(CONDITION_SOUL)
registerEnum(CONDITION_DROWN)
registerEnum(CONDITION_MUTED)
registerEnum(CONDITION_CHANNELMUTEDTICKS)
registerEnum(CONDITION_YELLTICKS)
registerEnum(CONDITION_ATTRIBUTES)
registerEnum(CONDITION_FREEZING)
registerEnum(CONDITION_DAZZLED)
registerEnum(CONDITION_CURSED)
registerEnum(CONDITION_EXHAUST_COMBAT)
registerEnum(CONDITION_EXHAUST_HEAL)
registerEnum(CONDITION_PACIFIED)
registerEnum(CONDITION_SPELLCOOLDOWN)
registerEnum(CONDITION_SPELLGROUPCOOLDOWN)
registerEnum(CONDITIONID_DEFAULT)
registerEnum(CONDITIONID_COMBAT)
registerEnum(CONDITIONID_HEAD)
registerEnum(CONDITIONID_NECKLACE)
registerEnum(CONDITIONID_BACKPACK)
registerEnum(CONDITIONID_ARMOR)
registerEnum(CONDITIONID_RIGHT)
registerEnum(CONDITIONID_LEFT)
registerEnum(CONDITIONID_LEGS)
registerEnum(CONDITIONID_FEET)
registerEnum(CONDITIONID_RING)
registerEnum(CONDITIONID_AMMO)
registerEnum(CONDITION_PARAM_OWNER)
registerEnum(CONDITION_PARAM_TICKS)
registerEnum(CONDITION_PARAM_DRUNKENNESS)
registerEnum(CONDITION_PARAM_HEALTHGAIN)
registerEnum(CONDITION_PARAM_HEALTHTICKS)
registerEnum(CONDITION_PARAM_MANAGAIN)
registerEnum(CONDITION_PARAM_MANATICKS)
registerEnum(CONDITION_PARAM_DELAYED)
registerEnum(CONDITION_PARAM_SPEED)
registerEnum(CONDITION_PARAM_LIGHT_LEVEL)
registerEnum(CONDITION_PARAM_LIGHT_COLOR)
registerEnum(CONDITION_PARAM_SOULGAIN)
registerEnum(CONDITION_PARAM_SOULTICKS)
registerEnum(CONDITION_PARAM_MINVALUE)
registerEnum(CONDITION_PARAM_MAXVALUE)
registerEnum(CONDITION_PARAM_STARTVALUE)
registerEnum(CONDITION_PARAM_TICKINTERVAL)
registerEnum(CONDITION_PARAM_FORCEUPDATE)
registerEnum(CONDITION_PARAM_SKILL_MELEE)
registerEnum(CONDITION_PARAM_SKILL_FIST)
registerEnum(CONDITION_PARAM_SKILL_CLUB)
registerEnum(CONDITION_PARAM_SKILL_SWORD)
registerEnum(CONDITION_PARAM_SKILL_AXE)
registerEnum(CONDITION_PARAM_SKILL_DISTANCE)
registerEnum(CONDITION_PARAM_SKILL_SHIELD)
registerEnum(CONDITION_PARAM_SKILL_FISHING)
registerEnum(CONDITION_PARAM_STAT_MAXHITPOINTS)
registerEnum(CONDITION_PARAM_STAT_MAXMANAPOINTS)
registerEnum(CONDITION_PARAM_STAT_MAGICPOINTS)
registerEnum(CONDITION_PARAM_STAT_MAXHITPOINTSPERCENT)
registerEnum(CONDITION_PARAM_STAT_MAXMANAPOINTSPERCENT)
registerEnum(CONDITION_PARAM_STAT_MAGICPOINTSPERCENT)
registerEnum(CONDITION_PARAM_PERIODICDAMAGE)
registerEnum(CONDITION_PARAM_SKILL_MELEEPERCENT)
registerEnum(CONDITION_PARAM_SKILL_FISTPERCENT)
registerEnum(CONDITION_PARAM_SKILL_CLUBPERCENT)
registerEnum(CONDITION_PARAM_SKILL_SWORDPERCENT)
registerEnum(CONDITION_PARAM_SKILL_AXEPERCENT)
registerEnum(CONDITION_PARAM_SKILL_DISTANCEPERCENT)
registerEnum(CONDITION_PARAM_SKILL_SHIELDPERCENT)
registerEnum(CONDITION_PARAM_SKILL_FISHINGPERCENT)
registerEnum(CONDITION_PARAM_BUFF_SPELL)
registerEnum(CONDITION_PARAM_SUBID)
registerEnum(CONDITION_PARAM_FIELD)
registerEnum(CONDITION_PARAM_DISABLE_DEFENSE)
registerEnum(CONDITION_PARAM_SPECIALSKILL_CRITICALHITCHANCE)
registerEnum(CONDITION_PARAM_SPECIALSKILL_CRITICALHITAMOUNT)
registerEnum(CONDITION_PARAM_SPECIALSKILL_LIFELEECHCHANCE)
registerEnum(CONDITION_PARAM_SPECIALSKILL_LIFELEECHAMOUNT)
registerEnum(CONDITION_PARAM_SPECIALSKILL_MANALEECHCHANCE)
registerEnum(CONDITION_PARAM_SPECIALSKILL_MANALEECHAMOUNT)
registerEnum(CONDITION_PARAM_AGGRESSIVE)
registerEnum(CONST_ME_NONE)
registerEnum(CONST_ME_DRAWBLOOD)
registerEnum(CONST_ME_LOSEENERGY)
registerEnum(CONST_ME_POFF)
registerEnum(CONST_ME_BLOCKHIT)
registerEnum(CONST_ME_EXPLOSIONAREA)
registerEnum(CONST_ME_EXPLOSIONHIT)
registerEnum(CONST_ME_FIREAREA)
registerEnum(CONST_ME_YELLOW_RINGS)
registerEnum(CONST_ME_GREEN_RINGS)
registerEnum(CONST_ME_HITAREA)
registerEnum(CONST_ME_TELEPORT)
registerEnum(CONST_ME_ENERGYHIT)
registerEnum(CONST_ME_MAGIC_BLUE)
registerEnum(CONST_ME_MAGIC_RED)
registerEnum(CONST_ME_MAGIC_GREEN)
registerEnum(CONST_ME_HITBYFIRE)
registerEnum(CONST_ME_HITBYPOISON)
registerEnum(CONST_ME_MORTAREA)
registerEnum(CONST_ME_SOUND_GREEN)
registerEnum(CONST_ME_SOUND_RED)
registerEnum(CONST_ME_POISONAREA)
registerEnum(CONST_ME_SOUND_YELLOW)
registerEnum(CONST_ME_SOUND_PURPLE)
registerEnum(CONST_ME_SOUND_BLUE)
registerEnum(CONST_ME_SOUND_WHITE)
registerEnum(CONST_ME_BUBBLES)
registerEnum(CONST_ME_CRAPS)
registerEnum(CONST_ME_GIFT_WRAPS)
registerEnum(CONST_ME_FIREWORK_YELLOW)
registerEnum(CONST_ME_FIREWORK_RED)
registerEnum(CONST_ME_FIREWORK_BLUE)
registerEnum(CONST_ME_STUN)
registerEnum(CONST_ME_SLEEP)
registerEnum(CONST_ME_WATERCREATURE)
registerEnum(CONST_ME_GROUNDSHAKER)
registerEnum(CONST_ME_HEARTS)
registerEnum(CONST_ME_FIREATTACK)
registerEnum(CONST_ME_ENERGYAREA)
registerEnum(CONST_ME_SMALLCLOUDS)
registerEnum(CONST_ME_HOLYDAMAGE)
registerEnum(CONST_ME_BIGCLOUDS)
registerEnum(CONST_ME_ICEAREA)
registerEnum(CONST_ME_ICETORNADO)
registerEnum(CONST_ME_ICEATTACK)
registerEnum(CONST_ME_STONES)
registerEnum(CONST_ME_SMALLPLANTS)
registerEnum(CONST_ME_CARNIPHILA)
registerEnum(CONST_ME_PURPLEENERGY)
registerEnum(CONST_ME_YELLOWENERGY)
registerEnum(CONST_ME_HOLYAREA)
registerEnum(CONST_ME_BIGPLANTS)
registerEnum(CONST_ME_CAKE)
registerEnum(CONST_ME_GIANTICE)
registerEnum(CONST_ME_WATERSPLASH)
registerEnum(CONST_ME_PLANTATTACK)
registerEnum(CONST_ME_TUTORIALARROW)
registerEnum(CONST_ME_TUTORIALSQUARE)
registerEnum(CONST_ME_MIRRORHORIZONTAL)
registerEnum(CONST_ME_MIRRORVERTICAL)
registerEnum(CONST_ME_SKULLHORIZONTAL)
registerEnum(CONST_ME_SKULLVERTICAL)
registerEnum(CONST_ME_ASSASSIN)
registerEnum(CONST_ME_STEPSHORIZONTAL)
registerEnum(CONST_ME_BLOODYSTEPS)
registerEnum(CONST_ME_STEPSVERTICAL)
registerEnum(CONST_ME_YALAHARIGHOST)
registerEnum(CONST_ME_BATS)
registerEnum(CONST_ME_SMOKE)
registerEnum(CONST_ME_INSECTS)
registerEnum(CONST_ME_DRAGONHEAD)
registerEnum(CONST_ME_ORCSHAMAN)
registerEnum(CONST_ME_ORCSHAMAN_FIRE)
registerEnum(CONST_ME_THUNDER)
registerEnum(CONST_ME_FERUMBRAS)
registerEnum(CONST_ME_CONFETTI_HORIZONTAL)
registerEnum(CONST_ME_CONFETTI_VERTICAL)
registerEnum(CONST_ME_BLACKSMOKE)
registerEnum(CONST_ME_REDSMOKE)
registerEnum(CONST_ME_YELLOWSMOKE)
registerEnum(CONST_ME_GREENSMOKE)
registerEnum(CONST_ME_PURPLESMOKE)
registerEnum(CONST_ME_EARLY_THUNDER)
registerEnum(CONST_ME_RAGIAZ_BONECAPSULE)
registerEnum(CONST_ME_CRITICAL_DAMAGE)
registerEnum(CONST_ME_PLUNGING_FISH)
registerEnum(CONST_ANI_NONE)
registerEnum(CONST_ANI_SPEAR)
registerEnum(CONST_ANI_BOLT)
registerEnum(CONST_ANI_ARROW)
registerEnum(CONST_ANI_FIRE)
registerEnum(CONST_ANI_ENERGY)
registerEnum(CONST_ANI_POISONARROW)
registerEnum(CONST_ANI_BURSTARROW)
registerEnum(CONST_ANI_THROWINGSTAR)
registerEnum(CONST_ANI_THROWINGKNIFE)
registerEnum(CONST_ANI_SMALLSTONE)
registerEnum(CONST_ANI_DEATH)
registerEnum(CONST_ANI_LARGEROCK)
registerEnum(CONST_ANI_SNOWBALL)
registerEnum(CONST_ANI_POWERBOLT)
registerEnum(CONST_ANI_POISON)
registerEnum(CONST_ANI_INFERNALBOLT)
registerEnum(CONST_ANI_HUNTINGSPEAR)
registerEnum(CONST_ANI_ENCHANTEDSPEAR)
registerEnum(CONST_ANI_REDSTAR)
registerEnum(CONST_ANI_GREENSTAR)
registerEnum(CONST_ANI_ROYALSPEAR)
registerEnum(CONST_ANI_SNIPERARROW)
registerEnum(CONST_ANI_ONYXARROW)
registerEnum(CONST_ANI_PIERCINGBOLT)
registerEnum(CONST_ANI_WHIRLWINDSWORD)
registerEnum(CONST_ANI_WHIRLWINDAXE)
registerEnum(CONST_ANI_WHIRLWINDCLUB)
registerEnum(CONST_ANI_ETHEREALSPEAR)
registerEnum(CONST_ANI_ICE)
registerEnum(CONST_ANI_EARTH)
registerEnum(CONST_ANI_HOLY)
registerEnum(CONST_ANI_SUDDENDEATH)
registerEnum(CONST_ANI_FLASHARROW)
registerEnum(CONST_ANI_FLAMMINGARROW)
registerEnum(CONST_ANI_SHIVERARROW)
registerEnum(CONST_ANI_ENERGYBALL)
registerEnum(CONST_ANI_SMALLICE)
registerEnum(CONST_ANI_SMALLHOLY)
registerEnum(CONST_ANI_SMALLEARTH)
registerEnum(CONST_ANI_EARTHARROW)
registerEnum(CONST_ANI_EXPLOSION)
registerEnum(CONST_ANI_CAKE)
registerEnum(CONST_ANI_TARSALARROW)
registerEnum(CONST_ANI_VORTEXBOLT)
registerEnum(CONST_ANI_PRISMATICBOLT)
registerEnum(CONST_ANI_CRYSTALLINEARROW)
registerEnum(CONST_ANI_DRILLBOLT)
registerEnum(CONST_ANI_ENVENOMEDARROW)
registerEnum(CONST_ANI_GLOOTHSPEAR)
registerEnum(CONST_ANI_SIMPLEARROW)
registerEnum(CONST_ANI_WEAPONTYPE)
registerEnum(CONST_PROP_BLOCKSOLID)
registerEnum(CONST_PROP_HASHEIGHT)
registerEnum(CONST_PROP_BLOCKPROJECTILE)
registerEnum(CONST_PROP_BLOCKPATH)
registerEnum(CONST_PROP_ISVERTICAL)
registerEnum(CONST_PROP_ISHORIZONTAL)
registerEnum(CONST_PROP_MOVEABLE)
registerEnum(CONST_PROP_IMMOVABLEBLOCKSOLID)
registerEnum(CONST_PROP_IMMOVABLEBLOCKPATH)
registerEnum(CONST_PROP_IMMOVABLENOFIELDBLOCKPATH)
registerEnum(CONST_PROP_NOFIELDBLOCKPATH)
registerEnum(CONST_PROP_SUPPORTHANGABLE)
registerEnum(CONST_SLOT_HEAD)
registerEnum(CONST_SLOT_NECKLACE)
registerEnum(CONST_SLOT_BACKPACK)
registerEnum(CONST_SLOT_ARMOR)
registerEnum(CONST_SLOT_RIGHT)
registerEnum(CONST_SLOT_LEFT)
registerEnum(CONST_SLOT_LEGS)
registerEnum(CONST_SLOT_FEET)
registerEnum(CONST_SLOT_RING)
registerEnum(CONST_SLOT_AMMO)
registerEnum(CREATURE_EVENT_NONE)
registerEnum(CREATURE_EVENT_LOGIN)
registerEnum(CREATURE_EVENT_LOGOUT)
registerEnum(CREATURE_EVENT_THINK)
registerEnum(CREATURE_EVENT_PREPAREDEATH)
registerEnum(CREATURE_EVENT_DEATH)
registerEnum(CREATURE_EVENT_KILL)
registerEnum(CREATURE_EVENT_ADVANCE)
registerEnum(CREATURE_EVENT_MODALWINDOW)
registerEnum(CREATURE_EVENT_TEXTEDIT)
registerEnum(CREATURE_EVENT_HEALTHCHANGE)
registerEnum(CREATURE_EVENT_MANACHANGE)
registerEnum(CREATURE_EVENT_EXTENDED_OPCODE)
registerEnum(GAME_STATE_STARTUP)
registerEnum(GAME_STATE_INIT)
registerEnum(GAME_STATE_NORMAL)
registerEnum(GAME_STATE_CLOSED)
registerEnum(GAME_STATE_SHUTDOWN)
registerEnum(GAME_STATE_CLOSING)
registerEnum(GAME_STATE_MAINTAIN)
registerEnum(MESSAGE_STATUS_CONSOLE_BLUE)
registerEnum(MESSAGE_STATUS_CONSOLE_RED)
registerEnum(MESSAGE_STATUS_DEFAULT)
registerEnum(MESSAGE_STATUS_WARNING)
registerEnum(MESSAGE_EVENT_ADVANCE)
registerEnum(MESSAGE_STATUS_SMALL)
registerEnum(MESSAGE_INFO_DESCR)
registerEnum(MESSAGE_DAMAGE_DEALT)
registerEnum(MESSAGE_DAMAGE_RECEIVED)
registerEnum(MESSAGE_HEALED)
registerEnum(MESSAGE_EXPERIENCE)
registerEnum(MESSAGE_DAMAGE_OTHERS)
registerEnum(MESSAGE_HEALED_OTHERS)
registerEnum(MESSAGE_EXPERIENCE_OTHERS)
registerEnum(MESSAGE_EVENT_DEFAULT)
registerEnum(MESSAGE_GUILD)
registerEnum(MESSAGE_PARTY_MANAGEMENT)
registerEnum(MESSAGE_PARTY)
registerEnum(MESSAGE_EVENT_ORANGE)
registerEnum(MESSAGE_STATUS_CONSOLE_ORANGE)
registerEnum(MESSAGE_LOOT)
registerEnum(CREATURETYPE_PLAYER)
registerEnum(CREATURETYPE_MONSTER)
registerEnum(CREATURETYPE_NPC)
registerEnum(CREATURETYPE_SUMMON_OWN)
registerEnum(CREATURETYPE_SUMMON_OTHERS)
registerEnum(CLIENTOS_LINUX)
registerEnum(CLIENTOS_WINDOWS)
registerEnum(CLIENTOS_FLASH)
registerEnum(CLIENTOS_OTCLIENT_LINUX)
registerEnum(CLIENTOS_OTCLIENT_WINDOWS)
registerEnum(CLIENTOS_OTCLIENT_MAC)
registerEnum(FIGHTMODE_ATTACK)
registerEnum(FIGHTMODE_BALANCED)
registerEnum(FIGHTMODE_DEFENSE)
registerEnum(ITEM_ATTRIBUTE_NONE)
registerEnum(ITEM_ATTRIBUTE_ACTIONID)
registerEnum(ITEM_ATTRIBUTE_UNIQUEID)
registerEnum(ITEM_ATTRIBUTE_DESCRIPTION)
registerEnum(ITEM_ATTRIBUTE_TEXT)
registerEnum(ITEM_ATTRIBUTE_DATE)
registerEnum(ITEM_ATTRIBUTE_WRITER)
registerEnum(ITEM_ATTRIBUTE_NAME)
registerEnum(ITEM_ATTRIBUTE_ARTICLE)
registerEnum(ITEM_ATTRIBUTE_PLURALNAME)
registerEnum(ITEM_ATTRIBUTE_WEIGHT)
registerEnum(ITEM_ATTRIBUTE_ATTACK)
registerEnum(ITEM_ATTRIBUTE_DEFENSE)
registerEnum(ITEM_ATTRIBUTE_EXTRADEFENSE)
registerEnum(ITEM_ATTRIBUTE_ARMOR)
registerEnum(ITEM_ATTRIBUTE_HITCHANCE)
registerEnum(ITEM_ATTRIBUTE_SHOOTRANGE)
registerEnum(ITEM_ATTRIBUTE_OWNER)
registerEnum(ITEM_ATTRIBUTE_DURATION)
registerEnum(ITEM_ATTRIBUTE_DECAYSTATE)
registerEnum(ITEM_ATTRIBUTE_CORPSEOWNER)
registerEnum(ITEM_ATTRIBUTE_CHARGES)
registerEnum(ITEM_ATTRIBUTE_FLUIDTYPE)
registerEnum(ITEM_ATTRIBUTE_DOORID)
registerEnum(ITEM_ATTRIBUTE_DECAYTO)
registerEnum(ITEM_ATTRIBUTE_WRAPID)
registerEnum(ITEM_ATTRIBUTE_STOREITEM)
registerEnum(ITEM_ATTRIBUTE_ATTACK_SPEED)
registerEnum(ITEM_TYPE_DEPOT)
registerEnum(ITEM_TYPE_MAILBOX)
registerEnum(ITEM_TYPE_TRASHHOLDER)
registerEnum(ITEM_TYPE_CONTAINER)
registerEnum(ITEM_TYPE_DOOR)
registerEnum(ITEM_TYPE_MAGICFIELD)
registerEnum(ITEM_TYPE_TELEPORT)
registerEnum(ITEM_TYPE_BED)
registerEnum(ITEM_TYPE_KEY)
registerEnum(ITEM_TYPE_RUNE)
registerEnum(ITEM_GROUP_GROUND)
registerEnum(ITEM_GROUP_CONTAINER)
registerEnum(ITEM_GROUP_WEAPON)
registerEnum(ITEM_GROUP_AMMUNITION)
registerEnum(ITEM_GROUP_ARMOR)
registerEnum(ITEM_GROUP_CHARGES)
registerEnum(ITEM_GROUP_TELEPORT)
registerEnum(ITEM_GROUP_MAGICFIELD)
registerEnum(ITEM_GROUP_WRITEABLE)
registerEnum(ITEM_GROUP_KEY)
registerEnum(ITEM_GROUP_SPLASH)
registerEnum(ITEM_GROUP_FLUID)
registerEnum(ITEM_GROUP_DOOR)
registerEnum(ITEM_GROUP_DEPRECATED)
registerEnum(ITEM_BROWSEFIELD)
registerEnum(ITEM_BAG)
registerEnum(ITEM_SHOPPING_BAG)
registerEnum(ITEM_GOLD_COIN)
registerEnum(ITEM_PLATINUM_COIN)
registerEnum(ITEM_CRYSTAL_COIN)
registerEnum(ITEM_AMULETOFLOSS)
registerEnum(ITEM_PARCEL)
registerEnum(ITEM_LABEL)
registerEnum(ITEM_FIREFIELD_PVP_FULL)
registerEnum(ITEM_FIREFIELD_PVP_MEDIUM)
registerEnum(ITEM_FIREFIELD_PVP_SMALL)
registerEnum(ITEM_FIREFIELD_PERSISTENT_FULL)
registerEnum(ITEM_FIREFIELD_PERSISTENT_MEDIUM)
registerEnum(ITEM_FIREFIELD_PERSISTENT_SMALL)
registerEnum(ITEM_FIREFIELD_NOPVP)
registerEnum(ITEM_POISONFIELD_PVP)
registerEnum(ITEM_POISONFIELD_PERSISTENT)
registerEnum(ITEM_POISONFIELD_NOPVP)
registerEnum(ITEM_ENERGYFIELD_PVP)
registerEnum(ITEM_ENERGYFIELD_PERSISTENT)
registerEnum(ITEM_ENERGYFIELD_NOPVP)
registerEnum(ITEM_MAGICWALL)
registerEnum(ITEM_MAGICWALL_PERSISTENT)
registerEnum(ITEM_MAGICWALL_SAFE)
registerEnum(ITEM_WILDGROWTH)
registerEnum(ITEM_WILDGROWTH_PERSISTENT)
registerEnum(ITEM_WILDGROWTH_SAFE)
registerEnum(WIELDINFO_NONE)
registerEnum(WIELDINFO_LEVEL)
registerEnum(WIELDINFO_MAGLV)
registerEnum(WIELDINFO_VOCREQ)
registerEnum(WIELDINFO_PREMIUM)
registerEnum(PlayerFlag_CannotUseCombat)
registerEnum(PlayerFlag_CannotAttackPlayer)
registerEnum(PlayerFlag_CannotAttackMonster)
registerEnum(PlayerFlag_CannotBeAttacked)
registerEnum(PlayerFlag_CanConvinceAll)
registerEnum(PlayerFlag_CanSummonAll)
registerEnum(PlayerFlag_CanIllusionAll)
registerEnum(PlayerFlag_CanSenseInvisibility)
registerEnum(PlayerFlag_IgnoredByMonsters)
registerEnum(PlayerFlag_NotGainInFight)
registerEnum(PlayerFlag_HasInfiniteMana)
registerEnum(PlayerFlag_HasInfiniteSoul)
registerEnum(PlayerFlag_HasNoExhaustion)
registerEnum(PlayerFlag_CannotUseSpells)
registerEnum(PlayerFlag_CannotPickupItem)
registerEnum(PlayerFlag_CanAlwaysLogin)
registerEnum(PlayerFlag_CanBroadcast)
registerEnum(PlayerFlag_CanEditHouses)
registerEnum(PlayerFlag_CannotBeBanned)
registerEnum(PlayerFlag_CannotBePushed)
registerEnum(PlayerFlag_HasInfiniteCapacity)
registerEnum(PlayerFlag_CanPushAllCreatures)
registerEnum(PlayerFlag_CanTalkRedPrivate)
registerEnum(PlayerFlag_CanTalkRedChannel)
registerEnum(PlayerFlag_TalkOrangeHelpChannel)
registerEnum(PlayerFlag_NotGainExperience)
registerEnum(PlayerFlag_NotGainMana)
registerEnum(PlayerFlag_NotGainHealth)
registerEnum(PlayerFlag_NotGainSkill)
registerEnum(PlayerFlag_SetMaxSpeed)
registerEnum(PlayerFlag_SpecialVIP)
registerEnum(PlayerFlag_NotGenerateLoot)
registerEnum(PlayerFlag_IgnoreProtectionZone)
registerEnum(PlayerFlag_IgnoreSpellCheck)
registerEnum(PlayerFlag_IgnoreWeaponCheck)
registerEnum(PlayerFlag_CannotBeMuted)
registerEnum(PlayerFlag_IsAlwaysPremium)
registerEnum(PLAYERSEX_FEMALE)
registerEnum(PLAYERSEX_MALE)
registerEnum(REPORT_REASON_NAMEINAPPROPRIATE)
registerEnum(REPORT_REASON_NAMEPOORFORMATTED)
registerEnum(REPORT_REASON_NAMEADVERTISING)
registerEnum(REPORT_REASON_NAMEUNFITTING)
registerEnum(REPORT_REASON_NAMERULEVIOLATION)
registerEnum(REPORT_REASON_INSULTINGSTATEMENT)
registerEnum(REPORT_REASON_SPAMMING)
registerEnum(REPORT_REASON_ADVERTISINGSTATEMENT)
registerEnum(REPORT_REASON_UNFITTINGSTATEMENT)
registerEnum(REPORT_REASON_LANGUAGESTATEMENT)
registerEnum(REPORT_REASON_DISCLOSURE)
registerEnum(REPORT_REASON_RULEVIOLATION)
registerEnum(REPORT_REASON_STATEMENT_BUGABUSE)
registerEnum(REPORT_REASON_UNOFFICIALSOFTWARE)
registerEnum(REPORT_REASON_PRETENDING)
registerEnum(REPORT_REASON_HARASSINGOWNERS)
registerEnum(REPORT_REASON_FALSEINFO)
registerEnum(REPORT_REASON_ACCOUNTSHARING)
registerEnum(REPORT_REASON_STEALINGDATA)
registerEnum(REPORT_REASON_SERVICEATTACKING)
registerEnum(REPORT_REASON_SERVICEAGREEMENT)
registerEnum(REPORT_TYPE_NAME)
registerEnum(REPORT_TYPE_STATEMENT)
registerEnum(REPORT_TYPE_BOT)
registerEnum(VOCATION_NONE)
registerEnum(SKILL_FIST)
registerEnum(SKILL_CLUB)
registerEnum(SKILL_SWORD)
registerEnum(SKILL_AXE)
registerEnum(SKILL_DISTANCE)
registerEnum(SKILL_SHIELD)
registerEnum(SKILL_FISHING)
registerEnum(SKILL_MAGLEVEL)
registerEnum(SKILL_LEVEL)
registerEnum(SPECIALSKILL_CRITICALHITCHANCE)
registerEnum(SPECIALSKILL_CRITICALHITAMOUNT)
registerEnum(SPECIALSKILL_LIFELEECHCHANCE)
registerEnum(SPECIALSKILL_LIFELEECHAMOUNT)
registerEnum(SPECIALSKILL_MANALEECHCHANCE)
registerEnum(SPECIALSKILL_MANALEECHAMOUNT)
registerEnum(SKULL_NONE)
registerEnum(SKULL_YELLOW)
registerEnum(SKULL_GREEN)
registerEnum(SKULL_WHITE)
registerEnum(SKULL_RED)
registerEnum(SKULL_BLACK)
registerEnum(SKULL_ORANGE)
registerEnum(TALKTYPE_SAY)
registerEnum(TALKTYPE_WHISPER)
registerEnum(TALKTYPE_YELL)
registerEnum(TALKTYPE_PRIVATE_FROM)
registerEnum(TALKTYPE_PRIVATE_TO)
registerEnum(TALKTYPE_CHANNEL_Y)
registerEnum(TALKTYPE_CHANNEL_O)
registerEnum(TALKTYPE_PRIVATE_NP)
registerEnum(TALKTYPE_PRIVATE_PN)
registerEnum(TALKTYPE_BROADCAST)
registerEnum(TALKTYPE_CHANNEL_R1)
registerEnum(TALKTYPE_PRIVATE_RED_FROM)
registerEnum(TALKTYPE_PRIVATE_RED_TO)
registerEnum(TALKTYPE_MONSTER_SAY)
registerEnum(TALKTYPE_MONSTER_YELL)
registerEnum(TEXTCOLOR_BLUE)
registerEnum(TEXTCOLOR_LIGHTGREEN)
registerEnum(TEXTCOLOR_LIGHTBLUE)
registerEnum(TEXTCOLOR_MAYABLUE)
registerEnum(TEXTCOLOR_DARKRED)
registerEnum(TEXTCOLOR_LIGHTGREY)
registerEnum(TEXTCOLOR_SKYBLUE)
registerEnum(TEXTCOLOR_PURPLE)
registerEnum(TEXTCOLOR_ELECTRICPURPLE)
registerEnum(TEXTCOLOR_RED)
registerEnum(TEXTCOLOR_PASTELRED)
registerEnum(TEXTCOLOR_ORANGE)
registerEnum(TEXTCOLOR_YELLOW)
registerEnum(TEXTCOLOR_WHITE_EXP)
registerEnum(TEXTCOLOR_NONE)
registerEnum(TILESTATE_NONE)
registerEnum(TILESTATE_PROTECTIONZONE)
registerEnum(TILESTATE_NOPVPZONE)
registerEnum(TILESTATE_NOLOGOUT)
registerEnum(TILESTATE_PVPZONE)
registerEnum(TILESTATE_FLOORCHANGE)
registerEnum(TILESTATE_FLOORCHANGE_DOWN)
registerEnum(TILESTATE_FLOORCHANGE_NORTH)
registerEnum(TILESTATE_FLOORCHANGE_SOUTH)
registerEnum(TILESTATE_FLOORCHANGE_EAST)
registerEnum(TILESTATE_FLOORCHANGE_WEST)
registerEnum(TILESTATE_TELEPORT)
registerEnum(TILESTATE_MAGICFIELD)
registerEnum(TILESTATE_MAILBOX)
registerEnum(TILESTATE_TRASHHOLDER)
registerEnum(TILESTATE_BED)
registerEnum(TILESTATE_DEPOT)
registerEnum(TILESTATE_BLOCKSOLID)
registerEnum(TILESTATE_BLOCKPATH)
registerEnum(TILESTATE_IMMOVABLEBLOCKSOLID)
registerEnum(TILESTATE_IMMOVABLEBLOCKPATH)
registerEnum(TILESTATE_IMMOVABLENOFIELDBLOCKPATH)
registerEnum(TILESTATE_NOFIELDBLOCKPATH)
registerEnum(TILESTATE_FLOORCHANGE_SOUTH_ALT)
registerEnum(TILESTATE_FLOORCHANGE_EAST_ALT)
registerEnum(TILESTATE_SUPPORTS_HANGABLE)
registerEnum(WEAPON_NONE)
registerEnum(WEAPON_SWORD)
registerEnum(WEAPON_CLUB)
registerEnum(WEAPON_AXE)
registerEnum(WEAPON_SHIELD)
registerEnum(WEAPON_DISTANCE)
registerEnum(WEAPON_WAND)
registerEnum(WEAPON_AMMO)
registerEnum(WORLD_TYPE_NO_PVP)
registerEnum(WORLD_TYPE_PVP)
registerEnum(WORLD_TYPE_PVP_ENFORCED)
// Use with container:addItem, container:addItemEx and possibly other functions.
registerEnum(FLAG_NOLIMIT)
registerEnum(FLAG_IGNOREBLOCKITEM)
registerEnum(FLAG_IGNOREBLOCKCREATURE)
registerEnum(FLAG_CHILDISOWNER)
registerEnum(FLAG_PATHFINDING)
registerEnum(FLAG_IGNOREFIELDDAMAGE)
registerEnum(FLAG_IGNORENOTMOVEABLE)
registerEnum(FLAG_IGNOREAUTOSTACK)
// Use with itemType:getSlotPosition
registerEnum(SLOTP_WHEREEVER)
registerEnum(SLOTP_HEAD)
registerEnum(SLOTP_NECKLACE)
registerEnum(SLOTP_BACKPACK)
registerEnum(SLOTP_ARMOR)
registerEnum(SLOTP_RIGHT)
registerEnum(SLOTP_LEFT)
registerEnum(SLOTP_LEGS)
registerEnum(SLOTP_FEET)
registerEnum(SLOTP_RING)
registerEnum(SLOTP_AMMO)
registerEnum(SLOTP_DEPOT)
registerEnum(SLOTP_TWO_HAND)
// Use with combat functions
registerEnum(ORIGIN_NONE)
registerEnum(ORIGIN_CONDITION)
registerEnum(ORIGIN_SPELL)
registerEnum(ORIGIN_MELEE)
registerEnum(ORIGIN_RANGED)
// Use with house:getAccessList, house:setAccessList
registerEnum(GUEST_LIST)
registerEnum(SUBOWNER_LIST)
// Use with npc:setSpeechBubble
registerEnum(SPEECHBUBBLE_NONE)
registerEnum(SPEECHBUBBLE_NORMAL)
registerEnum(SPEECHBUBBLE_TRADE)
registerEnum(SPEECHBUBBLE_QUEST)
registerEnum(SPEECHBUBBLE_QUESTTRADER)
// Use with player:addMapMark
registerEnum(MAPMARK_TICK)
registerEnum(MAPMARK_QUESTION)
registerEnum(MAPMARK_EXCLAMATION)
registerEnum(MAPMARK_STAR)
registerEnum(MAPMARK_CROSS)
registerEnum(MAPMARK_TEMPLE)
registerEnum(MAPMARK_KISS)
registerEnum(MAPMARK_SHOVEL)
registerEnum(MAPMARK_SWORD)
registerEnum(MAPMARK_FLAG)
registerEnum(MAPMARK_LOCK)
registerEnum(MAPMARK_BAG)
registerEnum(MAPMARK_SKULL)
registerEnum(MAPMARK_DOLLAR)
registerEnum(MAPMARK_REDNORTH)
registerEnum(MAPMARK_REDSOUTH)
registerEnum(MAPMARK_REDEAST)
registerEnum(MAPMARK_REDWEST)
registerEnum(MAPMARK_GREENNORTH)
registerEnum(MAPMARK_GREENSOUTH)
// Use with Game.getReturnMessage
registerEnum(RETURNVALUE_NOERROR)
registerEnum(RETURNVALUE_NOTPOSSIBLE)
registerEnum(RETURNVALUE_NOTENOUGHROOM)
registerEnum(RETURNVALUE_PLAYERISPZLOCKED)
registerEnum(RETURNVALUE_PLAYERISNOTINVITED)
registerEnum(RETURNVALUE_CANNOTTHROW)
registerEnum(RETURNVALUE_THEREISNOWAY)
registerEnum(RETURNVALUE_DESTINATIONOUTOFREACH)
registerEnum(RETURNVALUE_CREATUREBLOCK)
registerEnum(RETURNVALUE_NOTMOVEABLE)
registerEnum(RETURNVALUE_DROPTWOHANDEDITEM)
registerEnum(RETURNVALUE_BOTHHANDSNEEDTOBEFREE)
registerEnum(RETURNVALUE_CANONLYUSEONEWEAPON)
registerEnum(RETURNVALUE_NEEDEXCHANGE)
registerEnum(RETURNVALUE_CANNOTBEDRESSED)
registerEnum(RETURNVALUE_PUTTHISOBJECTINYOURHAND)
registerEnum(RETURNVALUE_PUTTHISOBJECTINBOTHHANDS)
registerEnum(RETURNVALUE_TOOFARAWAY)
registerEnum(RETURNVALUE_FIRSTGODOWNSTAIRS)
registerEnum(RETURNVALUE_FIRSTGOUPSTAIRS)
registerEnum(RETURNVALUE_CONTAINERNOTENOUGHROOM)
registerEnum(RETURNVALUE_NOTENOUGHCAPACITY)
registerEnum(RETURNVALUE_CANNOTPICKUP)
registerEnum(RETURNVALUE_THISISIMPOSSIBLE)
registerEnum(RETURNVALUE_DEPOTISFULL)
registerEnum(RETURNVALUE_CREATUREDOESNOTEXIST)
registerEnum(RETURNVALUE_CANNOTUSETHISOBJECT)
registerEnum(RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE)
registerEnum(RETURNVALUE_NOTREQUIREDLEVELTOUSERUNE)
registerEnum(RETURNVALUE_YOUAREALREADYTRADING)
registerEnum(RETURNVALUE_THISPLAYERISALREADYTRADING)
registerEnum(RETURNVALUE_YOUMAYNOTLOGOUTDURINGAFIGHT)
registerEnum(RETURNVALUE_DIRECTPLAYERSHOOT)
registerEnum(RETURNVALUE_NOTENOUGHLEVEL)
registerEnum(RETURNVALUE_NOTENOUGHMAGICLEVEL)
registerEnum(RETURNVALUE_NOTENOUGHMANA)
registerEnum(RETURNVALUE_NOTENOUGHSOUL)
registerEnum(RETURNVALUE_YOUAREEXHAUSTED)
registerEnum(RETURNVALUE_YOUCANNOTUSEOBJECTSTHATFAST)
registerEnum(RETURNVALUE_PLAYERISNOTREACHABLE)
registerEnum(RETURNVALUE_CANONLYUSETHISRUNEONCREATURES)
registerEnum(RETURNVALUE_ACTIONNOTPERMITTEDINPROTECTIONZONE)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKTHISPLAYER)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKAPERSONINPROTECTIONZONE)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKAPERSONWHILEINPROTECTIONZONE)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKTHISCREATURE)
registerEnum(RETURNVALUE_YOUCANONLYUSEITONCREATURES)
registerEnum(RETURNVALUE_CREATUREISNOTREACHABLE)
registerEnum(RETURNVALUE_TURNSECUREMODETOATTACKUNMARKEDPLAYERS)
registerEnum(RETURNVALUE_YOUNEEDPREMIUMACCOUNT)
registerEnum(RETURNVALUE_YOUNEEDTOLEARNTHISSPELL)
registerEnum(RETURNVALUE_YOURVOCATIONCANNOTUSETHISSPELL)
registerEnum(RETURNVALUE_YOUNEEDAWEAPONTOUSETHISSPELL)
registerEnum(RETURNVALUE_PLAYERISPZLOCKEDLEAVEPVPZONE)
registerEnum(RETURNVALUE_PLAYERISPZLOCKEDENTERPVPZONE)
registerEnum(RETURNVALUE_ACTIONNOTPERMITTEDINANOPVPZONE)
registerEnum(RETURNVALUE_YOUCANNOTLOGOUTHERE)
registerEnum(RETURNVALUE_YOUNEEDAMAGICITEMTOCASTSPELL)
registerEnum(RETURNVALUE_CANNOTCONJUREITEMHERE)
registerEnum(RETURNVALUE_YOUNEEDTOSPLITYOURSPEARS)
registerEnum(RETURNVALUE_NAMEISTOOAMBIGUOUS)
registerEnum(RETURNVALUE_CANONLYUSEONESHIELD)
registerEnum(RETURNVALUE_NOPARTYMEMBERSINRANGE)
registerEnum(RETURNVALUE_YOUARENOTTHEOWNER)
registerEnum(RETURNVALUE_TRADEPLAYERFARAWAY)
registerEnum(RETURNVALUE_YOUDONTOWNTHISHOUSE)
registerEnum(RETURNVALUE_TRADEPLAYERALREADYOWNSAHOUSE)
registerEnum(RETURNVALUE_TRADEPLAYERHIGHESTBIDDER)
registerEnum(RETURNVALUE_YOUCANNOTTRADETHISHOUSE)
registerEnum(RETURNVALUE_YOUDONTHAVEREQUIREDPROFESSION)
registerEnum(RETURNVALUE_YOUCANNOTUSETHISBED)
registerEnum(RELOAD_TYPE_ALL)
registerEnum(RELOAD_TYPE_ACTIONS)
registerEnum(RELOAD_TYPE_CHAT)
registerEnum(RELOAD_TYPE_CONFIG)
registerEnum(RELOAD_TYPE_CREATURESCRIPTS)
registerEnum(RELOAD_TYPE_EVENTS)
registerEnum(RELOAD_TYPE_GLOBAL)
registerEnum(RELOAD_TYPE_GLOBALEVENTS)
registerEnum(RELOAD_TYPE_ITEMS)
registerEnum(RELOAD_TYPE_MONSTERS)
registerEnum(RELOAD_TYPE_MOUNTS)
registerEnum(RELOAD_TYPE_MOVEMENTS)
registerEnum(RELOAD_TYPE_NPCS)
registerEnum(RELOAD_TYPE_QUESTS)
registerEnum(RELOAD_TYPE_RAIDS)
registerEnum(RELOAD_TYPE_SCRIPTS)
registerEnum(RELOAD_TYPE_SPELLS)
registerEnum(RELOAD_TYPE_TALKACTIONS)
registerEnum(RELOAD_TYPE_WEAPONS)
registerEnum(ZONE_PROTECTION)
registerEnum(ZONE_NOPVP)
registerEnum(ZONE_PVP)
registerEnum(ZONE_NOLOGOUT)
registerEnum(ZONE_NORMAL)
registerEnum(MAX_LOOTCHANCE)
registerEnum(SPELL_INSTANT)
registerEnum(SPELL_RUNE)
registerEnum(MONSTERS_EVENT_THINK)
registerEnum(MONSTERS_EVENT_APPEAR)
registerEnum(MONSTERS_EVENT_DISAPPEAR)
registerEnum(MONSTERS_EVENT_MOVE)
registerEnum(MONSTERS_EVENT_SAY)
// _G
registerGlobalVariable("INDEX_WHEREEVER", INDEX_WHEREEVER);
registerGlobalBoolean("VIRTUAL_PARENT", true);
registerGlobalMethod("isType", LuaScriptInterface::luaIsType);
registerGlobalMethod("rawgetmetatable", LuaScriptInterface::luaRawGetMetatable);
// configKeys
registerTable("configKeys");
registerEnumIn("configKeys", ConfigManager::ALLOW_CHANGEOUTFIT)
registerEnumIn("configKeys", ConfigManager::ONE_PLAYER_ON_ACCOUNT)
registerEnumIn("configKeys", ConfigManager::AIMBOT_HOTKEY_ENABLED)
registerEnumIn("configKeys", ConfigManager::REMOVE_RUNE_CHARGES)
registerEnumIn("configKeys", ConfigManager::REMOVE_WEAPON_AMMO)
registerEnumIn("configKeys", ConfigManager::REMOVE_WEAPON_CHARGES)
registerEnumIn("configKeys", ConfigManager::REMOVE_POTION_CHARGES)
registerEnumIn("configKeys", ConfigManager::EXPERIENCE_FROM_PLAYERS)
registerEnumIn("configKeys", ConfigManager::FREE_PREMIUM)
registerEnumIn("configKeys", ConfigManager::REPLACE_KICK_ON_LOGIN)
registerEnumIn("configKeys", ConfigManager::ALLOW_CLONES)
registerEnumIn("configKeys", ConfigManager::BIND_ONLY_GLOBAL_ADDRESS)
registerEnumIn("configKeys", ConfigManager::OPTIMIZE_DATABASE)
registerEnumIn("configKeys", ConfigManager::MARKET_PREMIUM)
registerEnumIn("configKeys", ConfigManager::EMOTE_SPELLS)
registerEnumIn("configKeys", ConfigManager::STAMINA_SYSTEM)
registerEnumIn("configKeys", ConfigManager::WARN_UNSAFE_SCRIPTS)
registerEnumIn("configKeys", ConfigManager::CONVERT_UNSAFE_SCRIPTS)
registerEnumIn("configKeys", ConfigManager::CLASSIC_EQUIPMENT_SLOTS)
registerEnumIn("configKeys", ConfigManager::CLASSIC_ATTACK_SPEED)
registerEnumIn("configKeys", ConfigManager::SERVER_SAVE_NOTIFY_MESSAGE)
registerEnumIn("configKeys", ConfigManager::SERVER_SAVE_NOTIFY_DURATION)
registerEnumIn("configKeys", ConfigManager::SERVER_SAVE_CLEAN_MAP)
registerEnumIn("configKeys", ConfigManager::SERVER_SAVE_CLOSE)
registerEnumIn("configKeys", ConfigManager::SERVER_SAVE_SHUTDOWN)
registerEnumIn("configKeys", ConfigManager::ONLINE_OFFLINE_CHARLIST)
registerEnumIn("configKeys", ConfigManager::LUA_ITEM_DESC)
registerEnumIn("configKeys", ConfigManager::MAP_NAME)
registerEnumIn("configKeys", ConfigManager::HOUSE_RENT_PERIOD)
registerEnumIn("configKeys", ConfigManager::SERVER_NAME)
registerEnumIn("configKeys", ConfigManager::OWNER_NAME)
registerEnumIn("configKeys", ConfigManager::OWNER_EMAIL)
registerEnumIn("configKeys", ConfigManager::URL)
registerEnumIn("configKeys", ConfigManager::LOCATION)
registerEnumIn("configKeys", ConfigManager::IP)
registerEnumIn("configKeys", ConfigManager::MOTD)
registerEnumIn("configKeys", ConfigManager::WORLD_TYPE)
registerEnumIn("configKeys", ConfigManager::MYSQL_HOST)
registerEnumIn("configKeys", ConfigManager::MYSQL_USER)
registerEnumIn("configKeys", ConfigManager::MYSQL_PASS)
registerEnumIn("configKeys", ConfigManager::MYSQL_DB)
registerEnumIn("configKeys", ConfigManager::MYSQL_SOCK)
registerEnumIn("configKeys", ConfigManager::DEFAULT_PRIORITY)
registerEnumIn("configKeys", ConfigManager::MAP_AUTHOR)
registerEnumIn("configKeys", ConfigManager::SQL_PORT)
registerEnumIn("configKeys", ConfigManager::MAX_PLAYERS)
registerEnumIn("configKeys", ConfigManager::PZ_LOCKED)
registerEnumIn("configKeys", ConfigManager::DEFAULT_DESPAWNRANGE)
registerEnumIn("configKeys", ConfigManager::DEFAULT_DESPAWNRADIUS)
registerEnumIn("configKeys", ConfigManager::DEFAULT_WALKTOSPAWNRADIUS)
registerEnumIn("configKeys", ConfigManager::REMOVE_ON_DESPAWN)
registerEnumIn("configKeys", ConfigManager::RATE_EXPERIENCE)
registerEnumIn("configKeys", ConfigManager::RATE_SKILL)
registerEnumIn("configKeys", ConfigManager::RATE_LOOT)
registerEnumIn("configKeys", ConfigManager::RATE_MAGIC)
registerEnumIn("configKeys", ConfigManager::RATE_SPAWN)
registerEnumIn("configKeys", ConfigManager::HOUSE_PRICE)
registerEnumIn("configKeys", ConfigManager::KILLS_TO_RED)
registerEnumIn("configKeys", ConfigManager::KILLS_TO_BLACK)
registerEnumIn("configKeys", ConfigManager::MAX_MESSAGEBUFFER)
registerEnumIn("configKeys", ConfigManager::ACTIONS_DELAY_INTERVAL)
registerEnumIn("configKeys", ConfigManager::EX_ACTIONS_DELAY_INTERVAL)
registerEnumIn("configKeys", ConfigManager::KICK_AFTER_MINUTES)
registerEnumIn("configKeys", ConfigManager::PROTECTION_LEVEL)
registerEnumIn("configKeys", ConfigManager::DEATH_LOSE_PERCENT)
registerEnumIn("configKeys", ConfigManager::STATUSQUERY_TIMEOUT)
registerEnumIn("configKeys", ConfigManager::FRAG_TIME)
registerEnumIn("configKeys", ConfigManager::WHITE_SKULL_TIME)
registerEnumIn("configKeys", ConfigManager::GAME_PORT)
registerEnumIn("configKeys", ConfigManager::LOGIN_PORT)
registerEnumIn("configKeys", ConfigManager::STATUS_PORT)
registerEnumIn("configKeys", ConfigManager::STAIRHOP_DELAY)
registerEnumIn("configKeys", ConfigManager::MARKET_OFFER_DURATION)
registerEnumIn("configKeys", ConfigManager::CHECK_EXPIRED_MARKET_OFFERS_EACH_MINUTES)
registerEnumIn("configKeys", ConfigManager::MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER)
registerEnumIn("configKeys", ConfigManager::EXP_FROM_PLAYERS_LEVEL_RANGE)
registerEnumIn("configKeys", ConfigManager::MAX_PACKETS_PER_SECOND)
registerEnumIn("configKeys", ConfigManager::PLAYER_CONSOLE_LOGS)
// os
registerMethod("os", "mtime", LuaScriptInterface::luaSystemTime);
// table
registerMethod("table", "create", LuaScriptInterface::luaTableCreate);
registerMethod("table", "pack", LuaScriptInterface::luaTablePack);
// Game
registerTable("Game");
registerMethod("Game", "getSpectators", LuaScriptInterface::luaGameGetSpectators);
registerMethod("Game", "getPlayers", LuaScriptInterface::luaGameGetPlayers);
registerMethod("Game", "loadMap", LuaScriptInterface::luaGameLoadMap);
registerMethod("Game", "getExperienceStage", LuaScriptInterface::luaGameGetExperienceStage);
registerMethod("Game", "getExperienceForLevel", LuaScriptInterface::luaGameGetExperienceForLevel);
registerMethod("Game", "getMonsterCount", LuaScriptInterface::luaGameGetMonsterCount);
registerMethod("Game", "getPlayerCount", LuaScriptInterface::luaGameGetPlayerCount);
registerMethod("Game", "getNpcCount", LuaScriptInterface::luaGameGetNpcCount);
registerMethod("Game", "getMonsterTypes", LuaScriptInterface::luaGameGetMonsterTypes);
registerMethod("Game", "getTowns", LuaScriptInterface::luaGameGetTowns);
registerMethod("Game", "getHouses", LuaScriptInterface::luaGameGetHouses);
registerMethod("Game", "getGameState", LuaScriptInterface::luaGameGetGameState);
registerMethod("Game", "setGameState", LuaScriptInterface::luaGameSetGameState);
registerMethod("Game", "getWorldType", LuaScriptInterface::luaGameGetWorldType);
registerMethod("Game", "setWorldType", LuaScriptInterface::luaGameSetWorldType);
registerMethod("Game", "getItemAttributeByName", LuaScriptInterface::luaGameGetItemAttributeByName);
registerMethod("Game", "getReturnMessage", LuaScriptInterface::luaGameGetReturnMessage);
registerMethod("Game", "createItem", LuaScriptInterface::luaGameCreateItem);
registerMethod("Game", "createContainer", LuaScriptInterface::luaGameCreateContainer);
registerMethod("Game", "createMonster", LuaScriptInterface::luaGameCreateMonster);
registerMethod("Game", "createNpc", LuaScriptInterface::luaGameCreateNpc);
registerMethod("Game", "createTile", LuaScriptInterface::luaGameCreateTile);
registerMethod("Game", "createMonsterType", LuaScriptInterface::luaGameCreateMonsterType);
registerMethod("Game", "startRaid", LuaScriptInterface::luaGameStartRaid);
registerMethod("Game", "getClientVersion", LuaScriptInterface::luaGameGetClientVersion);
registerMethod("Game", "reload", LuaScriptInterface::luaGameReload);
registerMethod("Game", "getAccountStorageValue", LuaScriptInterface::luaGameGetAccountStorageValue);
registerMethod("Game", "setAccountStorageValue", LuaScriptInterface::luaGameSetAccountStorageValue);
registerMethod("Game", "saveAccountStorageValues", LuaScriptInterface::luaGameSaveAccountStorageValues);
// Variant
registerClass("Variant", "", LuaScriptInterface::luaVariantCreate);
registerMethod("Variant", "getNumber", LuaScriptInterface::luaVariantGetNumber);
registerMethod("Variant", "getString", LuaScriptInterface::luaVariantGetString);
registerMethod("Variant", "getPosition", LuaScriptInterface::luaVariantGetPosition);
// Position
registerClass("Position", "", LuaScriptInterface::luaPositionCreate);
registerMetaMethod("Position", "__add", LuaScriptInterface::luaPositionAdd);
registerMetaMethod("Position", "__sub", LuaScriptInterface::luaPositionSub);
registerMetaMethod("Position", "__eq", LuaScriptInterface::luaPositionCompare);
registerMethod("Position", "getDistance", LuaScriptInterface::luaPositionGetDistance);
registerMethod("Position", "isSightClear", LuaScriptInterface::luaPositionIsSightClear);
registerMethod("Position", "sendMagicEffect", LuaScriptInterface::luaPositionSendMagicEffect);
registerMethod("Position", "sendDistanceEffect", LuaScriptInterface::luaPositionSendDistanceEffect);
// Tile
registerClass("Tile", "", LuaScriptInterface::luaTileCreate);
registerMetaMethod("Tile", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Tile", "remove", LuaScriptInterface::luaTileRemove);
registerMethod("Tile", "getPosition", LuaScriptInterface::luaTileGetPosition);
registerMethod("Tile", "getGround", LuaScriptInterface::luaTileGetGround);
registerMethod("Tile", "getThing", LuaScriptInterface::luaTileGetThing);
registerMethod("Tile", "getThingCount", LuaScriptInterface::luaTileGetThingCount);
registerMethod("Tile", "getTopVisibleThing", LuaScriptInterface::luaTileGetTopVisibleThing);
registerMethod("Tile", "getTopTopItem", LuaScriptInterface::luaTileGetTopTopItem);
registerMethod("Tile", "getTopDownItem", LuaScriptInterface::luaTileGetTopDownItem);
registerMethod("Tile", "getFieldItem", LuaScriptInterface::luaTileGetFieldItem);
registerMethod("Tile", "getItemById", LuaScriptInterface::luaTileGetItemById);
registerMethod("Tile", "getItemByType", LuaScriptInterface::luaTileGetItemByType);
registerMethod("Tile", "getItemByTopOrder", LuaScriptInterface::luaTileGetItemByTopOrder);
registerMethod("Tile", "getItemCountById", LuaScriptInterface::luaTileGetItemCountById);
registerMethod("Tile", "getBottomCreature", LuaScriptInterface::luaTileGetBottomCreature);
registerMethod("Tile", "getTopCreature", LuaScriptInterface::luaTileGetTopCreature);
registerMethod("Tile", "getBottomVisibleCreature", LuaScriptInterface::luaTileGetBottomVisibleCreature);
registerMethod("Tile", "getTopVisibleCreature", LuaScriptInterface::luaTileGetTopVisibleCreature);
registerMethod("Tile", "getItems", LuaScriptInterface::luaTileGetItems);
registerMethod("Tile", "getItemCount", LuaScriptInterface::luaTileGetItemCount);
registerMethod("Tile", "getDownItemCount", LuaScriptInterface::luaTileGetDownItemCount);
registerMethod("Tile", "getTopItemCount", LuaScriptInterface::luaTileGetTopItemCount);
registerMethod("Tile", "getCreatures", LuaScriptInterface::luaTileGetCreatures);
registerMethod("Tile", "getCreatureCount", LuaScriptInterface::luaTileGetCreatureCount);
registerMethod("Tile", "getThingIndex", LuaScriptInterface::luaTileGetThingIndex);
registerMethod("Tile", "hasProperty", LuaScriptInterface::luaTileHasProperty);
registerMethod("Tile", "hasFlag", LuaScriptInterface::luaTileHasFlag);
registerMethod("Tile", "queryAdd", LuaScriptInterface::luaTileQueryAdd);
registerMethod("Tile", "addItem", LuaScriptInterface::luaTileAddItem);
registerMethod("Tile", "addItemEx", LuaScriptInterface::luaTileAddItemEx);
registerMethod("Tile", "getHouse", LuaScriptInterface::luaTileGetHouse);
// NetworkMessage
registerClass("NetworkMessage", "", LuaScriptInterface::luaNetworkMessageCreate);
registerMetaMethod("NetworkMessage", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMetaMethod("NetworkMessage", "__gc", LuaScriptInterface::luaNetworkMessageDelete);
registerMethod("NetworkMessage", "delete", LuaScriptInterface::luaNetworkMessageDelete);
registerMethod("NetworkMessage", "getByte", LuaScriptInterface::luaNetworkMessageGetByte);
registerMethod("NetworkMessage", "getU16", LuaScriptInterface::luaNetworkMessageGetU16);
registerMethod("NetworkMessage", "getU32", LuaScriptInterface::luaNetworkMessageGetU32);
registerMethod("NetworkMessage", "getU64", LuaScriptInterface::luaNetworkMessageGetU64);
registerMethod("NetworkMessage", "getString", LuaScriptInterface::luaNetworkMessageGetString);
registerMethod("NetworkMessage", "getPosition", LuaScriptInterface::luaNetworkMessageGetPosition);
registerMethod("NetworkMessage", "addByte", LuaScriptInterface::luaNetworkMessageAddByte);
registerMethod("NetworkMessage", "addU16", LuaScriptInterface::luaNetworkMessageAddU16);
registerMethod("NetworkMessage", "addU32", LuaScriptInterface::luaNetworkMessageAddU32);
registerMethod("NetworkMessage", "addU64", LuaScriptInterface::luaNetworkMessageAddU64);
registerMethod("NetworkMessage", "addString", LuaScriptInterface::luaNetworkMessageAddString);
registerMethod("NetworkMessage", "addPosition", LuaScriptInterface::luaNetworkMessageAddPosition);
registerMethod("NetworkMessage", "addDouble", LuaScriptInterface::luaNetworkMessageAddDouble);
registerMethod("NetworkMessage", "addItem", LuaScriptInterface::luaNetworkMessageAddItem);
registerMethod("NetworkMessage", "addItemId", LuaScriptInterface::luaNetworkMessageAddItemId);
registerMethod("NetworkMessage", "reset", LuaScriptInterface::luaNetworkMessageReset);
registerMethod("NetworkMessage", "seek", LuaScriptInterface::luaNetworkMessageSeek);
registerMethod("NetworkMessage", "tell", LuaScriptInterface::luaNetworkMessageTell);
registerMethod("NetworkMessage", "len", LuaScriptInterface::luaNetworkMessageLength);
registerMethod("NetworkMessage", "skipBytes", LuaScriptInterface::luaNetworkMessageSkipBytes);
registerMethod("NetworkMessage", "sendToPlayer", LuaScriptInterface::luaNetworkMessageSendToPlayer);
// ModalWindow
registerClass("ModalWindow", "", LuaScriptInterface::luaModalWindowCreate);
registerMetaMethod("ModalWindow", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMetaMethod("ModalWindow", "__gc", LuaScriptInterface::luaModalWindowDelete);
registerMethod("ModalWindow", "delete", LuaScriptInterface::luaModalWindowDelete);
registerMethod("ModalWindow", "getId", LuaScriptInterface::luaModalWindowGetId);
registerMethod("ModalWindow", "getTitle", LuaScriptInterface::luaModalWindowGetTitle);
registerMethod("ModalWindow", "getMessage", LuaScriptInterface::luaModalWindowGetMessage);
registerMethod("ModalWindow", "setTitle", LuaScriptInterface::luaModalWindowSetTitle);
registerMethod("ModalWindow", "setMessage", LuaScriptInterface::luaModalWindowSetMessage);
registerMethod("ModalWindow", "getButtonCount", LuaScriptInterface::luaModalWindowGetButtonCount);
registerMethod("ModalWindow", "getChoiceCount", LuaScriptInterface::luaModalWindowGetChoiceCount);
registerMethod("ModalWindow", "addButton", LuaScriptInterface::luaModalWindowAddButton);
registerMethod("ModalWindow", "addChoice", LuaScriptInterface::luaModalWindowAddChoice);
registerMethod("ModalWindow", "getDefaultEnterButton", LuaScriptInterface::luaModalWindowGetDefaultEnterButton);
registerMethod("ModalWindow", "setDefaultEnterButton", LuaScriptInterface::luaModalWindowSetDefaultEnterButton);
registerMethod("ModalWindow", "getDefaultEscapeButton", LuaScriptInterface::luaModalWindowGetDefaultEscapeButton);
registerMethod("ModalWindow", "setDefaultEscapeButton", LuaScriptInterface::luaModalWindowSetDefaultEscapeButton);
registerMethod("ModalWindow", "hasPriority", LuaScriptInterface::luaModalWindowHasPriority);
registerMethod("ModalWindow", "setPriority", LuaScriptInterface::luaModalWindowSetPriority);
registerMethod("ModalWindow", "sendToPlayer", LuaScriptInterface::luaModalWindowSendToPlayer);
// Item
registerClass("Item", "", LuaScriptInterface::luaItemCreate);
registerMetaMethod("Item", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Item", "isItem", LuaScriptInterface::luaItemIsItem);
registerMethod("Item", "getParent", LuaScriptInterface::luaItemGetParent);
registerMethod("Item", "getTopParent", LuaScriptInterface::luaItemGetTopParent);
registerMethod("Item", "getId", LuaScriptInterface::luaItemGetId);
registerMethod("Item", "clone", LuaScriptInterface::luaItemClone);
registerMethod("Item", "split", LuaScriptInterface::luaItemSplit);
registerMethod("Item", "remove", LuaScriptInterface::luaItemRemove);
registerMethod("Item", "getUniqueId", LuaScriptInterface::luaItemGetUniqueId);
registerMethod("Item", "getActionId", LuaScriptInterface::luaItemGetActionId);
registerMethod("Item", "setActionId", LuaScriptInterface::luaItemSetActionId);
registerMethod("Item", "getCount", LuaScriptInterface::luaItemGetCount);
registerMethod("Item", "getCharges", LuaScriptInterface::luaItemGetCharges);
registerMethod("Item", "getFluidType", LuaScriptInterface::luaItemGetFluidType);
registerMethod("Item", "getWeight", LuaScriptInterface::luaItemGetWeight);
registerMethod("Item", "getSubType", LuaScriptInterface::luaItemGetSubType);
registerMethod("Item", "getName", LuaScriptInterface::luaItemGetName);
registerMethod("Item", "getPluralName", LuaScriptInterface::luaItemGetPluralName);
registerMethod("Item", "getArticle", LuaScriptInterface::luaItemGetArticle);
registerMethod("Item", "getPosition", LuaScriptInterface::luaItemGetPosition);
registerMethod("Item", "getTile", LuaScriptInterface::luaItemGetTile);
registerMethod("Item", "hasAttribute", LuaScriptInterface::luaItemHasAttribute);
registerMethod("Item", "getAttribute", LuaScriptInterface::luaItemGetAttribute);
registerMethod("Item", "setAttribute", LuaScriptInterface::luaItemSetAttribute);
registerMethod("Item", "removeAttribute", LuaScriptInterface::luaItemRemoveAttribute);
registerMethod("Item", "getCustomAttribute", LuaScriptInterface::luaItemGetCustomAttribute);
registerMethod("Item", "setCustomAttribute", LuaScriptInterface::luaItemSetCustomAttribute);
registerMethod("Item", "removeCustomAttribute", LuaScriptInterface::luaItemRemoveCustomAttribute);
registerMethod("Item", "moveTo", LuaScriptInterface::luaItemMoveTo);
registerMethod("Item", "transform", LuaScriptInterface::luaItemTransform);
registerMethod("Item", "decay", LuaScriptInterface::luaItemDecay);
registerMethod("Item", "getDescription", LuaScriptInterface::luaItemGetDescription);
registerMethod("Item", "getSpecialDescription", LuaScriptInterface::luaItemGetSpecialDescription);
registerMethod("Item", "hasProperty", LuaScriptInterface::luaItemHasProperty);
registerMethod("Item", "isLoadedFromMap", LuaScriptInterface::luaItemIsLoadedFromMap);
registerMethod("Item", "setStoreItem", LuaScriptInterface::luaItemSetStoreItem);
registerMethod("Item", "isStoreItem", LuaScriptInterface::luaItemIsStoreItem);
// Container
registerClass("Container", "Item", LuaScriptInterface::luaContainerCreate);
registerMetaMethod("Container", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Container", "getSize", LuaScriptInterface::luaContainerGetSize);
registerMethod("Container", "getCapacity", LuaScriptInterface::luaContainerGetCapacity);
registerMethod("Container", "getEmptySlots", LuaScriptInterface::luaContainerGetEmptySlots);
registerMethod("Container", "getContentDescription", LuaScriptInterface::luaContainerGetContentDescription);
registerMethod("Container", "getItems", LuaScriptInterface::luaContainerGetItems);
registerMethod("Container", "getItemHoldingCount", LuaScriptInterface::luaContainerGetItemHoldingCount);
registerMethod("Container", "getItemCountById", LuaScriptInterface::luaContainerGetItemCountById);
registerMethod("Container", "getItem", LuaScriptInterface::luaContainerGetItem);
registerMethod("Container", "hasItem", LuaScriptInterface::luaContainerHasItem);
registerMethod("Container", "addItem", LuaScriptInterface::luaContainerAddItem);
registerMethod("Container", "addItemEx", LuaScriptInterface::luaContainerAddItemEx);
registerMethod("Container", "getCorpseOwner", LuaScriptInterface::luaContainerGetCorpseOwner);
// Teleport
registerClass("Teleport", "Item", LuaScriptInterface::luaTeleportCreate);
registerMetaMethod("Teleport", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Teleport", "getDestination", LuaScriptInterface::luaTeleportGetDestination);
registerMethod("Teleport", "setDestination", LuaScriptInterface::luaTeleportSetDestination);
// Creature
registerClass("Creature", "", LuaScriptInterface::luaCreatureCreate);
registerMetaMethod("Creature", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Creature", "getEvents", LuaScriptInterface::luaCreatureGetEvents);
registerMethod("Creature", "registerEvent", LuaScriptInterface::luaCreatureRegisterEvent);
registerMethod("Creature", "unregisterEvent", LuaScriptInterface::luaCreatureUnregisterEvent);
registerMethod("Creature", "isRemoved", LuaScriptInterface::luaCreatureIsRemoved);
registerMethod("Creature", "isCreature", LuaScriptInterface::luaCreatureIsCreature);
registerMethod("Creature", "isInGhostMode", LuaScriptInterface::luaCreatureIsInGhostMode);
registerMethod("Creature", "isHealthHidden", LuaScriptInterface::luaCreatureIsHealthHidden);
registerMethod("Creature", "isMovementBlocked", LuaScriptInterface::luaCreatureIsMovementBlocked);
registerMethod("Creature", "isImmune", LuaScriptInterface::luaCreatureIsImmune);
registerMethod("Creature", "canSee", LuaScriptInterface::luaCreatureCanSee);
registerMethod("Creature", "canSeeCreature", LuaScriptInterface::luaCreatureCanSeeCreature);
registerMethod("Creature", "canSeeGhostMode", LuaScriptInterface::luaCreatureCanSeeGhostMode);
registerMethod("Creature", "canSeeInvisibility", LuaScriptInterface::luaCreatureCanSeeInvisibility);
registerMethod("Creature", "getParent", LuaScriptInterface::luaCreatureGetParent);
registerMethod("Creature", "getId", LuaScriptInterface::luaCreatureGetId);
registerMethod("Creature", "getName", LuaScriptInterface::luaCreatureGetName);
registerMethod("Creature", "getTarget", LuaScriptInterface::luaCreatureGetTarget);
registerMethod("Creature", "setTarget", LuaScriptInterface::luaCreatureSetTarget);
registerMethod("Creature", "getFollowCreature", LuaScriptInterface::luaCreatureGetFollowCreature);
registerMethod("Creature", "setFollowCreature", LuaScriptInterface::luaCreatureSetFollowCreature);
registerMethod("Creature", "getMaster", LuaScriptInterface::luaCreatureGetMaster);
registerMethod("Creature", "setMaster", LuaScriptInterface::luaCreatureSetMaster);
registerMethod("Creature", "getLight", LuaScriptInterface::luaCreatureGetLight);
registerMethod("Creature", "setLight", LuaScriptInterface::luaCreatureSetLight);
registerMethod("Creature", "getSpeed", LuaScriptInterface::luaCreatureGetSpeed);
registerMethod("Creature", "getBaseSpeed", LuaScriptInterface::luaCreatureGetBaseSpeed);
registerMethod("Creature", "changeSpeed", LuaScriptInterface::luaCreatureChangeSpeed);
registerMethod("Creature", "setDropLoot", LuaScriptInterface::luaCreatureSetDropLoot);
registerMethod("Creature", "setSkillLoss", LuaScriptInterface::luaCreatureSetSkillLoss);
registerMethod("Creature", "getPosition", LuaScriptInterface::luaCreatureGetPosition);
registerMethod("Creature", "getTile", LuaScriptInterface::luaCreatureGetTile);
registerMethod("Creature", "getDirection", LuaScriptInterface::luaCreatureGetDirection);
registerMethod("Creature", "setDirection", LuaScriptInterface::luaCreatureSetDirection);
registerMethod("Creature", "getHealth", LuaScriptInterface::luaCreatureGetHealth);
registerMethod("Creature", "setHealth", LuaScriptInterface::luaCreatureSetHealth);
registerMethod("Creature", "addHealth", LuaScriptInterface::luaCreatureAddHealth);
registerMethod("Creature", "getMaxHealth", LuaScriptInterface::luaCreatureGetMaxHealth);
registerMethod("Creature", "setMaxHealth", LuaScriptInterface::luaCreatureSetMaxHealth);
registerMethod("Creature", "setHiddenHealth", LuaScriptInterface::luaCreatureSetHiddenHealth);
registerMethod("Creature", "setMovementBlocked", LuaScriptInterface::luaCreatureSetMovementBlocked);
registerMethod("Creature", "getSkull", LuaScriptInterface::luaCreatureGetSkull);
registerMethod("Creature", "setSkull", LuaScriptInterface::luaCreatureSetSkull);
registerMethod("Creature", "getOutfit", LuaScriptInterface::luaCreatureGetOutfit);
registerMethod("Creature", "setOutfit", LuaScriptInterface::luaCreatureSetOutfit);
registerMethod("Creature", "getCondition", LuaScriptInterface::luaCreatureGetCondition);
registerMethod("Creature", "addCondition", LuaScriptInterface::luaCreatureAddCondition);
registerMethod("Creature", "removeCondition", LuaScriptInterface::luaCreatureRemoveCondition);
registerMethod("Creature", "hasCondition", LuaScriptInterface::luaCreatureHasCondition);
registerMethod("Creature", "remove", LuaScriptInterface::luaCreatureRemove);
registerMethod("Creature", "teleportTo", LuaScriptInterface::luaCreatureTeleportTo);
registerMethod("Creature", "say", LuaScriptInterface::luaCreatureSay);
registerMethod("Creature", "getDamageMap", LuaScriptInterface::luaCreatureGetDamageMap);
registerMethod("Creature", "getSummons", LuaScriptInterface::luaCreatureGetSummons);
registerMethod("Creature", "getDescription", LuaScriptInterface::luaCreatureGetDescription);
registerMethod("Creature", "getPathTo", LuaScriptInterface::luaCreatureGetPathTo);
registerMethod("Creature", "move", LuaScriptInterface::luaCreatureMove);
registerMethod("Creature", "getZone", LuaScriptInterface::luaCreatureGetZone);
// Player
registerClass("Player", "Creature", LuaScriptInterface::luaPlayerCreate);
registerMetaMethod("Player", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Player", "isPlayer", LuaScriptInterface::luaPlayerIsPlayer);
registerMethod("Player", "getGuid", LuaScriptInterface::luaPlayerGetGuid);
registerMethod("Player", "getIp", LuaScriptInterface::luaPlayerGetIp);
registerMethod("Player", "getAccountId", LuaScriptInterface::luaPlayerGetAccountId);
registerMethod("Player", "getLastLoginSaved", LuaScriptInterface::luaPlayerGetLastLoginSaved);
registerMethod("Player", "getLastLogout", LuaScriptInterface::luaPlayerGetLastLogout);
registerMethod("Player", "getAccountType", LuaScriptInterface::luaPlayerGetAccountType);
registerMethod("Player", "setAccountType", LuaScriptInterface::luaPlayerSetAccountType);
registerMethod("Player", "getCapacity", LuaScriptInterface::luaPlayerGetCapacity);
registerMethod("Player", "setCapacity", LuaScriptInterface::luaPlayerSetCapacity);
registerMethod("Player", "getFreeCapacity", LuaScriptInterface::luaPlayerGetFreeCapacity);
registerMethod("Player", "getDepotChest", LuaScriptInterface::luaPlayerGetDepotChest);
registerMethod("Player", "getInbox", LuaScriptInterface::luaPlayerGetInbox);
registerMethod("Player", "getSkullTime", LuaScriptInterface::luaPlayerGetSkullTime);
registerMethod("Player", "setSkullTime", LuaScriptInterface::luaPlayerSetSkullTime);
registerMethod("Player", "getDeathPenalty", LuaScriptInterface::luaPlayerGetDeathPenalty);
registerMethod("Player", "getExperience", LuaScriptInterface::luaPlayerGetExperience);
registerMethod("Player", "addExperience", LuaScriptInterface::luaPlayerAddExperience);
registerMethod("Player", "removeExperience", LuaScriptInterface::luaPlayerRemoveExperience);
registerMethod("Player", "getLevel", LuaScriptInterface::luaPlayerGetLevel);
registerMethod("Player", "getMagicLevel", LuaScriptInterface::luaPlayerGetMagicLevel);
registerMethod("Player", "getBaseMagicLevel", LuaScriptInterface::luaPlayerGetBaseMagicLevel);
registerMethod("Player", "getMana", LuaScriptInterface::luaPlayerGetMana);
registerMethod("Player", "addMana", LuaScriptInterface::luaPlayerAddMana);
registerMethod("Player", "getMaxMana", LuaScriptInterface::luaPlayerGetMaxMana);
registerMethod("Player", "setMaxMana", LuaScriptInterface::luaPlayerSetMaxMana);
registerMethod("Player", "getManaSpent", LuaScriptInterface::luaPlayerGetManaSpent);
registerMethod("Player", "addManaSpent", LuaScriptInterface::luaPlayerAddManaSpent);
registerMethod("Player", "removeManaSpent", LuaScriptInterface::luaPlayerRemoveManaSpent);
registerMethod("Player", "getBaseMaxHealth", LuaScriptInterface::luaPlayerGetBaseMaxHealth);
registerMethod("Player", "getBaseMaxMana", LuaScriptInterface::luaPlayerGetBaseMaxMana);
registerMethod("Player", "getSkillLevel", LuaScriptInterface::luaPlayerGetSkillLevel);
registerMethod("Player", "getEffectiveSkillLevel", LuaScriptInterface::luaPlayerGetEffectiveSkillLevel);
registerMethod("Player", "getSkillPercent", LuaScriptInterface::luaPlayerGetSkillPercent);
registerMethod("Player", "getSkillTries", LuaScriptInterface::luaPlayerGetSkillTries);
registerMethod("Player", "addSkillTries", LuaScriptInterface::luaPlayerAddSkillTries);
registerMethod("Player", "removeSkillTries", LuaScriptInterface::luaPlayerRemoveSkillTries);
registerMethod("Player", "getSpecialSkill", LuaScriptInterface::luaPlayerGetSpecialSkill);
registerMethod("Player", "addSpecialSkill", LuaScriptInterface::luaPlayerAddSpecialSkill);
registerMethod("Player", "addOfflineTrainingTime", LuaScriptInterface::luaPlayerAddOfflineTrainingTime);
registerMethod("Player", "getOfflineTrainingTime", LuaScriptInterface::luaPlayerGetOfflineTrainingTime);
registerMethod("Player", "removeOfflineTrainingTime", LuaScriptInterface::luaPlayerRemoveOfflineTrainingTime);
registerMethod("Player", "addOfflineTrainingTries", LuaScriptInterface::luaPlayerAddOfflineTrainingTries);
registerMethod("Player", "getOfflineTrainingSkill", LuaScriptInterface::luaPlayerGetOfflineTrainingSkill);
registerMethod("Player", "setOfflineTrainingSkill", LuaScriptInterface::luaPlayerSetOfflineTrainingSkill);
registerMethod("Player", "getItemCount", LuaScriptInterface::luaPlayerGetItemCount);
registerMethod("Player", "getItemById", LuaScriptInterface::luaPlayerGetItemById);
registerMethod("Player", "getVocation", LuaScriptInterface::luaPlayerGetVocation);
registerMethod("Player", "setVocation", LuaScriptInterface::luaPlayerSetVocation);
registerMethod("Player", "getSex", LuaScriptInterface::luaPlayerGetSex);
registerMethod("Player", "setSex", LuaScriptInterface::luaPlayerSetSex);
registerMethod("Player", "getTown", LuaScriptInterface::luaPlayerGetTown);
registerMethod("Player", "setTown", LuaScriptInterface::luaPlayerSetTown);
registerMethod("Player", "getGuild", LuaScriptInterface::luaPlayerGetGuild);
registerMethod("Player", "setGuild", LuaScriptInterface::luaPlayerSetGuild);
registerMethod("Player", "getGuildLevel", LuaScriptInterface::luaPlayerGetGuildLevel);
registerMethod("Player", "setGuildLevel", LuaScriptInterface::luaPlayerSetGuildLevel);
registerMethod("Player", "getGuildNick", LuaScriptInterface::luaPlayerGetGuildNick);
registerMethod("Player", "setGuildNick", LuaScriptInterface::luaPlayerSetGuildNick);
registerMethod("Player", "getGroup", LuaScriptInterface::luaPlayerGetGroup);
registerMethod("Player", "setGroup", LuaScriptInterface::luaPlayerSetGroup);
registerMethod("Player", "getStamina", LuaScriptInterface::luaPlayerGetStamina);
registerMethod("Player", "setStamina", LuaScriptInterface::luaPlayerSetStamina);
registerMethod("Player", "getSoul", LuaScriptInterface::luaPlayerGetSoul);
registerMethod("Player", "addSoul", LuaScriptInterface::luaPlayerAddSoul);
registerMethod("Player", "getMaxSoul", LuaScriptInterface::luaPlayerGetMaxSoul);
registerMethod("Player", "getBankBalance", LuaScriptInterface::luaPlayerGetBankBalance);
registerMethod("Player", "setBankBalance", LuaScriptInterface::luaPlayerSetBankBalance);
registerMethod("Player", "getStorageValue", LuaScriptInterface::luaPlayerGetStorageValue);
registerMethod("Player", "setStorageValue", LuaScriptInterface::luaPlayerSetStorageValue);
registerMethod("Player", "addItem", LuaScriptInterface::luaPlayerAddItem);
registerMethod("Player", "addItemEx", LuaScriptInterface::luaPlayerAddItemEx);
registerMethod("Player", "removeItem", LuaScriptInterface::luaPlayerRemoveItem);
registerMethod("Player", "getMoney", LuaScriptInterface::luaPlayerGetMoney);
registerMethod("Player", "addMoney", LuaScriptInterface::luaPlayerAddMoney);
registerMethod("Player", "removeMoney", LuaScriptInterface::luaPlayerRemoveMoney);
registerMethod("Player", "showTextDialog", LuaScriptInterface::luaPlayerShowTextDialog);
registerMethod("Player", "sendTextMessage", LuaScriptInterface::luaPlayerSendTextMessage);
registerMethod("Player", "sendChannelMessage", LuaScriptInterface::luaPlayerSendChannelMessage);
registerMethod("Player", "sendPrivateMessage", LuaScriptInterface::luaPlayerSendPrivateMessage);
registerMethod("Player", "channelSay", LuaScriptInterface::luaPlayerChannelSay);
registerMethod("Player", "openChannel", LuaScriptInterface::luaPlayerOpenChannel);
registerMethod("Player", "getSlotItem", LuaScriptInterface::luaPlayerGetSlotItem);
registerMethod("Player", "getParty", LuaScriptInterface::luaPlayerGetParty);
registerMethod("Player", "addOutfit", LuaScriptInterface::luaPlayerAddOutfit);
registerMethod("Player", "addOutfitAddon", LuaScriptInterface::luaPlayerAddOutfitAddon);
registerMethod("Player", "removeOutfit", LuaScriptInterface::luaPlayerRemoveOutfit);
registerMethod("Player", "removeOutfitAddon", LuaScriptInterface::luaPlayerRemoveOutfitAddon);
registerMethod("Player", "hasOutfit", LuaScriptInterface::luaPlayerHasOutfit);
registerMethod("Player", "canWearOutfit", LuaScriptInterface::luaPlayerCanWearOutfit);
registerMethod("Player", "sendOutfitWindow", LuaScriptInterface::luaPlayerSendOutfitWindow);
registerMethod("Player", "addMount", LuaScriptInterface::luaPlayerAddMount);
registerMethod("Player", "removeMount", LuaScriptInterface::luaPlayerRemoveMount);
registerMethod("Player", "hasMount", LuaScriptInterface::luaPlayerHasMount);
registerMethod("Player", "getPremiumEndsAt", LuaScriptInterface::luaPlayerGetPremiumEndsAt);
registerMethod("Player", "setPremiumEndsAt", LuaScriptInterface::luaPlayerSetPremiumEndsAt);
registerMethod("Player", "hasBlessing", LuaScriptInterface::luaPlayerHasBlessing);
registerMethod("Player", "addBlessing", LuaScriptInterface::luaPlayerAddBlessing);
registerMethod("Player", "removeBlessing", LuaScriptInterface::luaPlayerRemoveBlessing);
registerMethod("Player", "canLearnSpell", LuaScriptInterface::luaPlayerCanLearnSpell);
registerMethod("Player", "learnSpell", LuaScriptInterface::luaPlayerLearnSpell);
registerMethod("Player", "forgetSpell", LuaScriptInterface::luaPlayerForgetSpell);
registerMethod("Player", "hasLearnedSpell", LuaScriptInterface::luaPlayerHasLearnedSpell);
registerMethod("Player", "sendTutorial", LuaScriptInterface::luaPlayerSendTutorial);
registerMethod("Player", "addMapMark", LuaScriptInterface::luaPlayerAddMapMark);
registerMethod("Player", "save", LuaScriptInterface::luaPlayerSave);
registerMethod("Player", "popupFYI", LuaScriptInterface::luaPlayerPopupFYI);
registerMethod("Player", "isPzLocked", LuaScriptInterface::luaPlayerIsPzLocked);
registerMethod("Player", "getClient", LuaScriptInterface::luaPlayerGetClient);
registerMethod("Player", "getHouse", LuaScriptInterface::luaPlayerGetHouse);
registerMethod("Player", "sendHouseWindow", LuaScriptInterface::luaPlayerSendHouseWindow);
registerMethod("Player", "setEditHouse", LuaScriptInterface::luaPlayerSetEditHouse);
registerMethod("Player", "setGhostMode", LuaScriptInterface::luaPlayerSetGhostMode);
registerMethod("Player", "getContainerId", LuaScriptInterface::luaPlayerGetContainerId);
registerMethod("Player", "getContainerById", LuaScriptInterface::luaPlayerGetContainerById);
registerMethod("Player", "getContainerIndex", LuaScriptInterface::luaPlayerGetContainerIndex);
registerMethod("Player", "getInstantSpells", LuaScriptInterface::luaPlayerGetInstantSpells);
registerMethod("Player", "canCast", LuaScriptInterface::luaPlayerCanCast);
registerMethod("Player", "hasChaseMode", LuaScriptInterface::luaPlayerHasChaseMode);
registerMethod("Player", "hasSecureMode", LuaScriptInterface::luaPlayerHasSecureMode);
registerMethod("Player", "getFightMode", LuaScriptInterface::luaPlayerGetFightMode);
registerMethod("Player", "getStoreInbox", LuaScriptInterface::luaPlayerGetStoreInbox);
// Monster
registerClass("Monster", "Creature", LuaScriptInterface::luaMonsterCreate);
registerMetaMethod("Monster", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Monster", "isMonster", LuaScriptInterface::luaMonsterIsMonster);
registerMethod("Monster", "getType", LuaScriptInterface::luaMonsterGetType);
registerMethod("Monster", "rename", LuaScriptInterface::luaMonsterRename);
registerMethod("Monster", "getSpawnPosition", LuaScriptInterface::luaMonsterGetSpawnPosition);
registerMethod("Monster", "isInSpawnRange", LuaScriptInterface::luaMonsterIsInSpawnRange);
registerMethod("Monster", "isIdle", LuaScriptInterface::luaMonsterIsIdle);
registerMethod("Monster", "setIdle", LuaScriptInterface::luaMonsterSetIdle);
registerMethod("Monster", "isTarget", LuaScriptInterface::luaMonsterIsTarget);
registerMethod("Monster", "isOpponent", LuaScriptInterface::luaMonsterIsOpponent);
registerMethod("Monster", "isFriend", LuaScriptInterface::luaMonsterIsFriend);
registerMethod("Monster", "addFriend", LuaScriptInterface::luaMonsterAddFriend);
registerMethod("Monster", "removeFriend", LuaScriptInterface::luaMonsterRemoveFriend);
registerMethod("Monster", "getFriendList", LuaScriptInterface::luaMonsterGetFriendList);
registerMethod("Monster", "getFriendCount", LuaScriptInterface::luaMonsterGetFriendCount);
registerMethod("Monster", "addTarget", LuaScriptInterface::luaMonsterAddTarget);
registerMethod("Monster", "removeTarget", LuaScriptInterface::luaMonsterRemoveTarget);
registerMethod("Monster", "getTargetList", LuaScriptInterface::luaMonsterGetTargetList);
registerMethod("Monster", "getTargetCount", LuaScriptInterface::luaMonsterGetTargetCount);
registerMethod("Monster", "selectTarget", LuaScriptInterface::luaMonsterSelectTarget);
registerMethod("Monster", "searchTarget", LuaScriptInterface::luaMonsterSearchTarget);
// Npc
registerClass("Npc", "Creature", LuaScriptInterface::luaNpcCreate);
registerMetaMethod("Npc", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Npc", "isNpc", LuaScriptInterface::luaNpcIsNpc);
registerMethod("Npc", "setMasterPos", LuaScriptInterface::luaNpcSetMasterPos);
registerMethod("Npc", "getSpeechBubble", LuaScriptInterface::luaNpcGetSpeechBubble);
registerMethod("Npc", "setSpeechBubble", LuaScriptInterface::luaNpcSetSpeechBubble);
// Guild
registerClass("Guild", "", LuaScriptInterface::luaGuildCreate);
registerMetaMethod("Guild", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Guild", "getId", LuaScriptInterface::luaGuildGetId);
registerMethod("Guild", "getName", LuaScriptInterface::luaGuildGetName);
registerMethod("Guild", "getMembersOnline", LuaScriptInterface::luaGuildGetMembersOnline);
registerMethod("Guild", "addRank", LuaScriptInterface::luaGuildAddRank);
registerMethod("Guild", "getRankById", LuaScriptInterface::luaGuildGetRankById);
registerMethod("Guild", "getRankByLevel", LuaScriptInterface::luaGuildGetRankByLevel);
registerMethod("Guild", "getMotd", LuaScriptInterface::luaGuildGetMotd);
registerMethod("Guild", "setMotd", LuaScriptInterface::luaGuildSetMotd);
// Group
registerClass("Group", "", LuaScriptInterface::luaGroupCreate);
registerMetaMethod("Group", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Group", "getId", LuaScriptInterface::luaGroupGetId);
registerMethod("Group", "getName", LuaScriptInterface::luaGroupGetName);
registerMethod("Group", "getFlags", LuaScriptInterface::luaGroupGetFlags);
registerMethod("Group", "getAccess", LuaScriptInterface::luaGroupGetAccess);
registerMethod("Group", "getMaxDepotItems", LuaScriptInterface::luaGroupGetMaxDepotItems);
registerMethod("Group", "getMaxVipEntries", LuaScriptInterface::luaGroupGetMaxVipEntries);
registerMethod("Group", "hasFlag", LuaScriptInterface::luaGroupHasFlag);
// Vocation
registerClass("Vocation", "", LuaScriptInterface::luaVocationCreate);
registerMetaMethod("Vocation", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Vocation", "getId", LuaScriptInterface::luaVocationGetId);
registerMethod("Vocation", "getClientId", LuaScriptInterface::luaVocationGetClientId);
registerMethod("Vocation", "getName", LuaScriptInterface::luaVocationGetName);
registerMethod("Vocation", "getDescription", LuaScriptInterface::luaVocationGetDescription);
registerMethod("Vocation", "getRequiredSkillTries", LuaScriptInterface::luaVocationGetRequiredSkillTries);
registerMethod("Vocation", "getRequiredManaSpent", LuaScriptInterface::luaVocationGetRequiredManaSpent);
registerMethod("Vocation", "getCapacityGain", LuaScriptInterface::luaVocationGetCapacityGain);
registerMethod("Vocation", "getHealthGain", LuaScriptInterface::luaVocationGetHealthGain);
registerMethod("Vocation", "getHealthGainTicks", LuaScriptInterface::luaVocationGetHealthGainTicks);
registerMethod("Vocation", "getHealthGainAmount", LuaScriptInterface::luaVocationGetHealthGainAmount);
registerMethod("Vocation", "getManaGain", LuaScriptInterface::luaVocationGetManaGain);
registerMethod("Vocation", "getManaGainTicks", LuaScriptInterface::luaVocationGetManaGainTicks);
registerMethod("Vocation", "getManaGainAmount", LuaScriptInterface::luaVocationGetManaGainAmount);
registerMethod("Vocation", "getMaxSoul", LuaScriptInterface::luaVocationGetMaxSoul);
registerMethod("Vocation", "getSoulGainTicks", LuaScriptInterface::luaVocationGetSoulGainTicks);
registerMethod("Vocation", "getAttackSpeed", LuaScriptInterface::luaVocationGetAttackSpeed);
registerMethod("Vocation", "getBaseSpeed", LuaScriptInterface::luaVocationGetBaseSpeed);
registerMethod("Vocation", "getDemotion", LuaScriptInterface::luaVocationGetDemotion);
registerMethod("Vocation", "getPromotion", LuaScriptInterface::luaVocationGetPromotion);
registerMethod("Vocation", "allowsPvp", LuaScriptInterface::luaVocationAllowsPvp);
// Town
registerClass("Town", "", LuaScriptInterface::luaTownCreate);
registerMetaMethod("Town", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Town", "getId", LuaScriptInterface::luaTownGetId);
registerMethod("Town", "getName", LuaScriptInterface::luaTownGetName);
registerMethod("Town", "getTemplePosition", LuaScriptInterface::luaTownGetTemplePosition);
// House
registerClass("House", "", LuaScriptInterface::luaHouseCreate);
registerMetaMethod("House", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("House", "getId", LuaScriptInterface::luaHouseGetId);
registerMethod("House", "getName", LuaScriptInterface::luaHouseGetName);
registerMethod("House", "getTown", LuaScriptInterface::luaHouseGetTown);
registerMethod("House", "getExitPosition", LuaScriptInterface::luaHouseGetExitPosition);
registerMethod("House", "getRent", LuaScriptInterface::luaHouseGetRent);
registerMethod("House", "getOwnerGuid", LuaScriptInterface::luaHouseGetOwnerGuid);
registerMethod("House", "setOwnerGuid", LuaScriptInterface::luaHouseSetOwnerGuid);
registerMethod("House", "startTrade", LuaScriptInterface::luaHouseStartTrade);
registerMethod("House", "getBeds", LuaScriptInterface::luaHouseGetBeds);
registerMethod("House", "getBedCount", LuaScriptInterface::luaHouseGetBedCount);
registerMethod("House", "getDoors", LuaScriptInterface::luaHouseGetDoors);
registerMethod("House", "getDoorCount", LuaScriptInterface::luaHouseGetDoorCount);
registerMethod("House", "getDoorIdByPosition", LuaScriptInterface::luaHouseGetDoorIdByPosition);
registerMethod("House", "getTiles", LuaScriptInterface::luaHouseGetTiles);
registerMethod("House", "getItems", LuaScriptInterface::luaHouseGetItems);
registerMethod("House", "getTileCount", LuaScriptInterface::luaHouseGetTileCount);
registerMethod("House", "canEditAccessList", LuaScriptInterface::luaHouseCanEditAccessList);
registerMethod("House", "getAccessList", LuaScriptInterface::luaHouseGetAccessList);
registerMethod("House", "setAccessList", LuaScriptInterface::luaHouseSetAccessList);
registerMethod("House", "kickPlayer", LuaScriptInterface::luaHouseKickPlayer);
registerMethod("House", "save", LuaScriptInterface::luaHouseSave);
// ItemType
registerClass("ItemType", "", LuaScriptInterface::luaItemTypeCreate);
registerMetaMethod("ItemType", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("ItemType", "isCorpse", LuaScriptInterface::luaItemTypeIsCorpse);
registerMethod("ItemType", "isDoor", LuaScriptInterface::luaItemTypeIsDoor);
registerMethod("ItemType", "isContainer", LuaScriptInterface::luaItemTypeIsContainer);
registerMethod("ItemType", "isFluidContainer", LuaScriptInterface::luaItemTypeIsFluidContainer);
registerMethod("ItemType", "isMovable", LuaScriptInterface::luaItemTypeIsMovable);
registerMethod("ItemType", "isRune", LuaScriptInterface::luaItemTypeIsRune);
registerMethod("ItemType", "isStackable", LuaScriptInterface::luaItemTypeIsStackable);
registerMethod("ItemType", "isReadable", LuaScriptInterface::luaItemTypeIsReadable);
registerMethod("ItemType", "isWritable", LuaScriptInterface::luaItemTypeIsWritable);
registerMethod("ItemType", "isBlocking", LuaScriptInterface::luaItemTypeIsBlocking);
registerMethod("ItemType", "isGroundTile", LuaScriptInterface::luaItemTypeIsGroundTile);
registerMethod("ItemType", "isMagicField", LuaScriptInterface::luaItemTypeIsMagicField);
registerMethod("ItemType", "isUseable", LuaScriptInterface::luaItemTypeIsUseable);
registerMethod("ItemType", "isPickupable", LuaScriptInterface::luaItemTypeIsPickupable);
registerMethod("ItemType", "getType", LuaScriptInterface::luaItemTypeGetType);
registerMethod("ItemType", "getGroup", LuaScriptInterface::luaItemTypeGetGroup);
registerMethod("ItemType", "getId", LuaScriptInterface::luaItemTypeGetId);
registerMethod("ItemType", "getClientId", LuaScriptInterface::luaItemTypeGetClientId);
registerMethod("ItemType", "getName", LuaScriptInterface::luaItemTypeGetName);
registerMethod("ItemType", "getPluralName", LuaScriptInterface::luaItemTypeGetPluralName);
registerMethod("ItemType", "getArticle", LuaScriptInterface::luaItemTypeGetArticle);
registerMethod("ItemType", "getDescription", LuaScriptInterface::luaItemTypeGetDescription);
registerMethod("ItemType", "getSlotPosition", LuaScriptInterface::luaItemTypeGetSlotPosition);
registerMethod("ItemType", "getCharges", LuaScriptInterface::luaItemTypeGetCharges);
registerMethod("ItemType", "getFluidSource", LuaScriptInterface::luaItemTypeGetFluidSource);
registerMethod("ItemType", "getCapacity", LuaScriptInterface::luaItemTypeGetCapacity);
registerMethod("ItemType", "getWeight", LuaScriptInterface::luaItemTypeGetWeight);
registerMethod("ItemType", "getHitChance", LuaScriptInterface::luaItemTypeGetHitChance);
registerMethod("ItemType", "getShootRange", LuaScriptInterface::luaItemTypeGetShootRange);
registerMethod("ItemType", "getAttack", LuaScriptInterface::luaItemTypeGetAttack);
registerMethod("ItemType", "getAttackSpeed", LuaScriptInterface::luaItemTypeGetAttackSpeed);
registerMethod("ItemType", "getDefense", LuaScriptInterface::luaItemTypeGetDefense);
registerMethod("ItemType", "getExtraDefense", LuaScriptInterface::luaItemTypeGetExtraDefense);
registerMethod("ItemType", "getArmor", LuaScriptInterface::luaItemTypeGetArmor);
registerMethod("ItemType", "getWeaponType", LuaScriptInterface::luaItemTypeGetWeaponType);
registerMethod("ItemType", "getElementType", LuaScriptInterface::luaItemTypeGetElementType);
registerMethod("ItemType", "getElementDamage", LuaScriptInterface::luaItemTypeGetElementDamage);
registerMethod("ItemType", "getTransformEquipId", LuaScriptInterface::luaItemTypeGetTransformEquipId);
registerMethod("ItemType", "getTransformDeEquipId", LuaScriptInterface::luaItemTypeGetTransformDeEquipId);
registerMethod("ItemType", "getDestroyId", LuaScriptInterface::luaItemTypeGetDestroyId);
registerMethod("ItemType", "getDecayId", LuaScriptInterface::luaItemTypeGetDecayId);
registerMethod("ItemType", "getRequiredLevel", LuaScriptInterface::luaItemTypeGetRequiredLevel);
registerMethod("ItemType", "getAmmoType", LuaScriptInterface::luaItemTypeGetAmmoType);
registerMethod("ItemType", "getCorpseType", LuaScriptInterface::luaItemTypeGetCorpseType);
registerMethod("ItemType", "getAbilities", LuaScriptInterface::luaItemTypeGetAbilities);
registerMethod("ItemType", "hasShowAttributes", LuaScriptInterface::luaItemTypeHasShowAttributes);
registerMethod("ItemType", "hasShowCount", LuaScriptInterface::luaItemTypeHasShowCount);
registerMethod("ItemType", "hasShowCharges", LuaScriptInterface::luaItemTypeHasShowCharges);
registerMethod("ItemType", "hasShowDuration", LuaScriptInterface::luaItemTypeHasShowDuration);
registerMethod("ItemType", "hasAllowDistRead", LuaScriptInterface::luaItemTypeHasAllowDistRead);
registerMethod("ItemType", "getWieldInfo", LuaScriptInterface::luaItemTypeGetWieldInfo);
registerMethod("ItemType", "getDuration", LuaScriptInterface::luaItemTypeGetDuration);
registerMethod("ItemType", "getLevelDoor", LuaScriptInterface::luaItemTypeGetLevelDoor);
registerMethod("ItemType", "getVocationString", LuaScriptInterface::luaItemTypeGetVocationString);
registerMethod("ItemType", "getMinReqLevel", LuaScriptInterface::luaItemTypeGetMinReqLevel);
registerMethod("ItemType", "getMinReqMagicLevel", LuaScriptInterface::luaItemTypeGetMinReqMagicLevel);
registerMethod("ItemType", "hasSubType", LuaScriptInterface::luaItemTypeHasSubType);
registerMethod("ItemType", "isStoreItem", LuaScriptInterface::luaItemTypeIsStoreItem);
// Combat
registerClass("Combat", "", LuaScriptInterface::luaCombatCreate);
registerMetaMethod("Combat", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMetaMethod("Combat", "__gc", LuaScriptInterface::luaCombatDelete);
registerMethod("Combat", "delete", LuaScriptInterface::luaCombatDelete);
registerMethod("Combat", "setParameter", LuaScriptInterface::luaCombatSetParameter);
registerMethod("Combat", "getParameter", LuaScriptInterface::luaCombatGetParameter);
registerMethod("Combat", "setFormula", LuaScriptInterface::luaCombatSetFormula);
registerMethod("Combat", "setArea", LuaScriptInterface::luaCombatSetArea);
registerMethod("Combat", "addCondition", LuaScriptInterface::luaCombatAddCondition);
registerMethod("Combat", "clearConditions", LuaScriptInterface::luaCombatClearConditions);
registerMethod("Combat", "setCallback", LuaScriptInterface::luaCombatSetCallback);
registerMethod("Combat", "setOrigin", LuaScriptInterface::luaCombatSetOrigin);
registerMethod("Combat", "execute", LuaScriptInterface::luaCombatExecute);
// Condition
registerClass("Condition", "", LuaScriptInterface::luaConditionCreate);
registerMetaMethod("Condition", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMetaMethod("Condition", "__gc", LuaScriptInterface::luaConditionDelete);
registerMethod("Condition", "getId", LuaScriptInterface::luaConditionGetId);
registerMethod("Condition", "getSubId", LuaScriptInterface::luaConditionGetSubId);
registerMethod("Condition", "getType", LuaScriptInterface::luaConditionGetType);
registerMethod("Condition", "getIcons", LuaScriptInterface::luaConditionGetIcons);
registerMethod("Condition", "getEndTime", LuaScriptInterface::luaConditionGetEndTime);
registerMethod("Condition", "clone", LuaScriptInterface::luaConditionClone);
registerMethod("Condition", "getTicks", LuaScriptInterface::luaConditionGetTicks);
registerMethod("Condition", "setTicks", LuaScriptInterface::luaConditionSetTicks);
registerMethod("Condition", "setParameter", LuaScriptInterface::luaConditionSetParameter);
registerMethod("Condition", "getParameter", LuaScriptInterface::luaConditionGetParameter);
registerMethod("Condition", "setFormula", LuaScriptInterface::luaConditionSetFormula);
registerMethod("Condition", "setOutfit", LuaScriptInterface::luaConditionSetOutfit);
registerMethod("Condition", "addDamage", LuaScriptInterface::luaConditionAddDamage);
// Outfit
registerClass("Outfit", "", LuaScriptInterface::luaOutfitCreate);
registerMetaMethod("Outfit", "__eq", LuaScriptInterface::luaOutfitCompare);
// MonsterType
registerClass("MonsterType", "", LuaScriptInterface::luaMonsterTypeCreate);
registerMetaMethod("MonsterType", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("MonsterType", "isAttackable", LuaScriptInterface::luaMonsterTypeIsAttackable);
registerMethod("MonsterType", "isChallengeable", LuaScriptInterface::luaMonsterTypeIsChallengeable);
registerMethod("MonsterType", "isConvinceable", LuaScriptInterface::luaMonsterTypeIsConvinceable);
registerMethod("MonsterType", "isSummonable", LuaScriptInterface::luaMonsterTypeIsSummonable);
registerMethod("MonsterType", "isIgnoringSpawnBlock", LuaScriptInterface::luaMonsterTypeIsIgnoringSpawnBlock);
registerMethod("MonsterType", "isIllusionable", LuaScriptInterface::luaMonsterTypeIsIllusionable);
registerMethod("MonsterType", "isHostile", LuaScriptInterface::luaMonsterTypeIsHostile);
registerMethod("MonsterType", "isPushable", LuaScriptInterface::luaMonsterTypeIsPushable);
registerMethod("MonsterType", "isHealthHidden", LuaScriptInterface::luaMonsterTypeIsHealthHidden);
registerMethod("MonsterType", "isBoss", LuaScriptInterface::luaMonsterTypeIsBoss);
registerMethod("MonsterType", "canPushItems", LuaScriptInterface::luaMonsterTypeCanPushItems);
registerMethod("MonsterType", "canPushCreatures", LuaScriptInterface::luaMonsterTypeCanPushCreatures);
registerMethod("MonsterType", "canWalkOnEnergy", LuaScriptInterface::luaMonsterTypeCanWalkOnEnergy);
registerMethod("MonsterType", "canWalkOnFire", LuaScriptInterface::luaMonsterTypeCanWalkOnFire);
registerMethod("MonsterType", "canWalkOnPoison", LuaScriptInterface::luaMonsterTypeCanWalkOnPoison);
registerMethod("MonsterType", "name", LuaScriptInterface::luaMonsterTypeName);
registerMethod("MonsterType", "nameDescription", LuaScriptInterface::luaMonsterTypeNameDescription);
registerMethod("MonsterType", "health", LuaScriptInterface::luaMonsterTypeHealth);
registerMethod("MonsterType", "maxHealth", LuaScriptInterface::luaMonsterTypeMaxHealth);
registerMethod("MonsterType", "runHealth", LuaScriptInterface::luaMonsterTypeRunHealth);
registerMethod("MonsterType", "experience", LuaScriptInterface::luaMonsterTypeExperience);
registerMethod("MonsterType", "skull", LuaScriptInterface::luaMonsterTypeSkull);
registerMethod("MonsterType", "combatImmunities", LuaScriptInterface::luaMonsterTypeCombatImmunities);
registerMethod("MonsterType", "conditionImmunities", LuaScriptInterface::luaMonsterTypeConditionImmunities);
registerMethod("MonsterType", "getAttackList", LuaScriptInterface::luaMonsterTypeGetAttackList);
registerMethod("MonsterType", "addAttack", LuaScriptInterface::luaMonsterTypeAddAttack);
registerMethod("MonsterType", "getDefenseList", LuaScriptInterface::luaMonsterTypeGetDefenseList);
registerMethod("MonsterType", "addDefense", LuaScriptInterface::luaMonsterTypeAddDefense);
registerMethod("MonsterType", "getElementList", LuaScriptInterface::luaMonsterTypeGetElementList);
registerMethod("MonsterType", "addElement", LuaScriptInterface::luaMonsterTypeAddElement);
registerMethod("MonsterType", "getVoices", LuaScriptInterface::luaMonsterTypeGetVoices);
registerMethod("MonsterType", "addVoice", LuaScriptInterface::luaMonsterTypeAddVoice);
registerMethod("MonsterType", "getLoot", LuaScriptInterface::luaMonsterTypeGetLoot);
registerMethod("MonsterType", "addLoot", LuaScriptInterface::luaMonsterTypeAddLoot);
registerMethod("MonsterType", "getCreatureEvents", LuaScriptInterface::luaMonsterTypeGetCreatureEvents);
registerMethod("MonsterType", "registerEvent", LuaScriptInterface::luaMonsterTypeRegisterEvent);
registerMethod("MonsterType", "eventType", LuaScriptInterface::luaMonsterTypeEventType);
registerMethod("MonsterType", "onThink", LuaScriptInterface::luaMonsterTypeEventOnCallback);
registerMethod("MonsterType", "onAppear", LuaScriptInterface::luaMonsterTypeEventOnCallback);
registerMethod("MonsterType", "onDisappear", LuaScriptInterface::luaMonsterTypeEventOnCallback);
registerMethod("MonsterType", "onMove", LuaScriptInterface::luaMonsterTypeEventOnCallback);
registerMethod("MonsterType", "onSay", LuaScriptInterface::luaMonsterTypeEventOnCallback);
registerMethod("MonsterType", "getSummonList", LuaScriptInterface::luaMonsterTypeGetSummonList);
registerMethod("MonsterType", "addSummon", LuaScriptInterface::luaMonsterTypeAddSummon);
registerMethod("MonsterType", "maxSummons", LuaScriptInterface::luaMonsterTypeMaxSummons);
registerMethod("MonsterType", "armor", LuaScriptInterface::luaMonsterTypeArmor);
registerMethod("MonsterType", "defense", LuaScriptInterface::luaMonsterTypeDefense);
registerMethod("MonsterType", "outfit", LuaScriptInterface::luaMonsterTypeOutfit);
registerMethod("MonsterType", "race", LuaScriptInterface::luaMonsterTypeRace);
registerMethod("MonsterType", "corpseId", LuaScriptInterface::luaMonsterTypeCorpseId);
registerMethod("MonsterType", "manaCost", LuaScriptInterface::luaMonsterTypeManaCost);
registerMethod("MonsterType", "baseSpeed", LuaScriptInterface::luaMonsterTypeBaseSpeed);
registerMethod("MonsterType", "light", LuaScriptInterface::luaMonsterTypeLight);
registerMethod("MonsterType", "staticAttackChance", LuaScriptInterface::luaMonsterTypeStaticAttackChance);
registerMethod("MonsterType", "targetDistance", LuaScriptInterface::luaMonsterTypeTargetDistance);
registerMethod("MonsterType", "yellChance", LuaScriptInterface::luaMonsterTypeYellChance);
registerMethod("MonsterType", "yellSpeedTicks", LuaScriptInterface::luaMonsterTypeYellSpeedTicks);
registerMethod("MonsterType", "changeTargetChance", LuaScriptInterface::luaMonsterTypeChangeTargetChance);
registerMethod("MonsterType", "changeTargetSpeed", LuaScriptInterface::luaMonsterTypeChangeTargetSpeed);
// Loot
registerClass("Loot", "", LuaScriptInterface::luaCreateLoot);
registerMetaMethod("Loot", "__gc", LuaScriptInterface::luaDeleteLoot);
registerMethod("Loot", "delete", LuaScriptInterface::luaDeleteLoot);
registerMethod("Loot", "setId", LuaScriptInterface::luaLootSetId);
registerMethod("Loot", "setMaxCount", LuaScriptInterface::luaLootSetMaxCount);
registerMethod("Loot", "setSubType", LuaScriptInterface::luaLootSetSubType);
registerMethod("Loot", "setChance", LuaScriptInterface::luaLootSetChance);
registerMethod("Loot", "setActionId", LuaScriptInterface::luaLootSetActionId);
registerMethod("Loot", "setDescription", LuaScriptInterface::luaLootSetDescription);
registerMethod("Loot", "addChildLoot", LuaScriptInterface::luaLootAddChildLoot);
// MonsterSpell
registerClass("MonsterSpell", "", LuaScriptInterface::luaCreateMonsterSpell);
registerMetaMethod("MonsterSpell", "__gc", LuaScriptInterface::luaDeleteMonsterSpell);
registerMethod("MonsterSpell", "delete", LuaScriptInterface::luaDeleteMonsterSpell);
registerMethod("MonsterSpell", "setType", LuaScriptInterface::luaMonsterSpellSetType);
registerMethod("MonsterSpell", "setScriptName", LuaScriptInterface::luaMonsterSpellSetScriptName);
registerMethod("MonsterSpell", "setChance", LuaScriptInterface::luaMonsterSpellSetChance);
registerMethod("MonsterSpell", "setInterval", LuaScriptInterface::luaMonsterSpellSetInterval);
registerMethod("MonsterSpell", "setRange", LuaScriptInterface::luaMonsterSpellSetRange);
registerMethod("MonsterSpell", "setCombatValue", LuaScriptInterface::luaMonsterSpellSetCombatValue);
registerMethod("MonsterSpell", "setCombatType", LuaScriptInterface::luaMonsterSpellSetCombatType);
registerMethod("MonsterSpell", "setAttackValue", LuaScriptInterface::luaMonsterSpellSetAttackValue);
registerMethod("MonsterSpell", "setNeedTarget", LuaScriptInterface::luaMonsterSpellSetNeedTarget);
registerMethod("MonsterSpell", "setNeedDirection", LuaScriptInterface::luaMonsterSpellSetNeedDirection);
registerMethod("MonsterSpell", "setCombatLength", LuaScriptInterface::luaMonsterSpellSetCombatLength);
registerMethod("MonsterSpell", "setCombatSpread", LuaScriptInterface::luaMonsterSpellSetCombatSpread);
registerMethod("MonsterSpell", "setCombatRadius", LuaScriptInterface::luaMonsterSpellSetCombatRadius);
registerMethod("MonsterSpell", "setConditionType", LuaScriptInterface::luaMonsterSpellSetConditionType);
registerMethod("MonsterSpell", "setConditionDamage", LuaScriptInterface::luaMonsterSpellSetConditionDamage);
registerMethod("MonsterSpell", "setConditionSpeedChange", LuaScriptInterface::luaMonsterSpellSetConditionSpeedChange);
registerMethod("MonsterSpell", "setConditionDuration", LuaScriptInterface::luaMonsterSpellSetConditionDuration);
registerMethod("MonsterSpell", "setConditionDrunkenness", LuaScriptInterface::luaMonsterSpellSetConditionDrunkenness);
registerMethod("MonsterSpell", "setConditionTickInterval", LuaScriptInterface::luaMonsterSpellSetConditionTickInterval);
registerMethod("MonsterSpell", "setCombatShootEffect", LuaScriptInterface::luaMonsterSpellSetCombatShootEffect);
registerMethod("MonsterSpell", "setCombatEffect", LuaScriptInterface::luaMonsterSpellSetCombatEffect);
// Party
registerClass("Party", "", LuaScriptInterface::luaPartyCreate);
registerMetaMethod("Party", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Party", "disband", LuaScriptInterface::luaPartyDisband);
registerMethod("Party", "getLeader", LuaScriptInterface::luaPartyGetLeader);
registerMethod("Party", "setLeader", LuaScriptInterface::luaPartySetLeader);
registerMethod("Party", "getMembers", LuaScriptInterface::luaPartyGetMembers);
registerMethod("Party", "getMemberCount", LuaScriptInterface::luaPartyGetMemberCount);
registerMethod("Party", "getInvitees", LuaScriptInterface::luaPartyGetInvitees);
registerMethod("Party", "getInviteeCount", LuaScriptInterface::luaPartyGetInviteeCount);
registerMethod("Party", "addInvite", LuaScriptInterface::luaPartyAddInvite);
registerMethod("Party", "removeInvite", LuaScriptInterface::luaPartyRemoveInvite);
registerMethod("Party", "addMember", LuaScriptInterface::luaPartyAddMember);
registerMethod("Party", "removeMember", LuaScriptInterface::luaPartyRemoveMember);
registerMethod("Party", "isSharedExperienceActive", LuaScriptInterface::luaPartyIsSharedExperienceActive);
registerMethod("Party", "isSharedExperienceEnabled", LuaScriptInterface::luaPartyIsSharedExperienceEnabled);
registerMethod("Party", "shareExperience", LuaScriptInterface::luaPartyShareExperience);
registerMethod("Party", "setSharedExperience", LuaScriptInterface::luaPartySetSharedExperience);
// Spells
registerClass("Spell", "", LuaScriptInterface::luaSpellCreate);
registerMetaMethod("Spell", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Spell", "onCastSpell", LuaScriptInterface::luaSpellOnCastSpell);
registerMethod("Spell", "register", LuaScriptInterface::luaSpellRegister);
registerMethod("Spell", "name", LuaScriptInterface::luaSpellName);
registerMethod("Spell", "id", LuaScriptInterface::luaSpellId);
registerMethod("Spell", "group", LuaScriptInterface::luaSpellGroup);
registerMethod("Spell", "cooldown", LuaScriptInterface::luaSpellCooldown);
registerMethod("Spell", "groupCooldown", LuaScriptInterface::luaSpellGroupCooldown);
registerMethod("Spell", "level", LuaScriptInterface::luaSpellLevel);
registerMethod("Spell", "magicLevel", LuaScriptInterface::luaSpellMagicLevel);
registerMethod("Spell", "mana", LuaScriptInterface::luaSpellMana);
registerMethod("Spell", "manaPercent", LuaScriptInterface::luaSpellManaPercent);
registerMethod("Spell", "soul", LuaScriptInterface::luaSpellSoul);
registerMethod("Spell", "range", LuaScriptInterface::luaSpellRange);
registerMethod("Spell", "isPremium", LuaScriptInterface::luaSpellPremium);
registerMethod("Spell", "isEnabled", LuaScriptInterface::luaSpellEnabled);
registerMethod("Spell", "needTarget", LuaScriptInterface::luaSpellNeedTarget);
registerMethod("Spell", "needWeapon", LuaScriptInterface::luaSpellNeedWeapon);
registerMethod("Spell", "needLearn", LuaScriptInterface::luaSpellNeedLearn);
registerMethod("Spell", "isSelfTarget", LuaScriptInterface::luaSpellSelfTarget);
registerMethod("Spell", "isBlocking", LuaScriptInterface::luaSpellBlocking);
registerMethod("Spell", "isAggressive", LuaScriptInterface::luaSpellAggressive);
registerMethod("Spell", "isPzLock", LuaScriptInterface::luaSpellPzLock);
registerMethod("Spell", "vocation", LuaScriptInterface::luaSpellVocation);
// only for InstantSpell
registerMethod("Spell", "words", LuaScriptInterface::luaSpellWords);
registerMethod("Spell", "needDirection", LuaScriptInterface::luaSpellNeedDirection);
registerMethod("Spell", "hasParams", LuaScriptInterface::luaSpellHasParams);
registerMethod("Spell", "hasPlayerNameParam", LuaScriptInterface::luaSpellHasPlayerNameParam);
registerMethod("Spell", "needCasterTargetOrDirection", LuaScriptInterface::luaSpellNeedCasterTargetOrDirection);
registerMethod("Spell", "isBlockingWalls", LuaScriptInterface::luaSpellIsBlockingWalls);
// only for RuneSpells
registerMethod("Spell", "runeLevel", LuaScriptInterface::luaSpellRuneLevel);
registerMethod("Spell", "runeMagicLevel", LuaScriptInterface::luaSpellRuneMagicLevel);
registerMethod("Spell", "runeId", LuaScriptInterface::luaSpellRuneId);
registerMethod("Spell", "charges", LuaScriptInterface::luaSpellCharges);
registerMethod("Spell", "allowFarUse", LuaScriptInterface::luaSpellAllowFarUse);
registerMethod("Spell", "blockWalls", LuaScriptInterface::luaSpellBlockWalls);
registerMethod("Spell", "checkFloor", LuaScriptInterface::luaSpellCheckFloor);
// Action
registerClass("Action", "", LuaScriptInterface::luaCreateAction);
registerMethod("Action", "onUse", LuaScriptInterface::luaActionOnUse);
registerMethod("Action", "register", LuaScriptInterface::luaActionRegister);
registerMethod("Action", "id", LuaScriptInterface::luaActionItemId);
registerMethod("Action", "aid", LuaScriptInterface::luaActionActionId);
registerMethod("Action", "uid", LuaScriptInterface::luaActionUniqueId);
registerMethod("Action", "allowFarUse", LuaScriptInterface::luaActionAllowFarUse);
registerMethod("Action", "blockWalls", LuaScriptInterface::luaActionBlockWalls);
registerMethod("Action", "checkFloor", LuaScriptInterface::luaActionCheckFloor);
// TalkAction
registerClass("TalkAction", "", LuaScriptInterface::luaCreateTalkaction);
registerMethod("TalkAction", "onSay", LuaScriptInterface::luaTalkactionOnSay);
registerMethod("TalkAction", "register", LuaScriptInterface::luaTalkactionRegister);
registerMethod("TalkAction", "separator", LuaScriptInterface::luaTalkactionSeparator);
registerMethod("TalkAction", "access", LuaScriptInterface::luaTalkactionAccess);
registerMethod("TalkAction", "accountType", LuaScriptInterface::luaTalkactionAccountType);
// CreatureEvent
registerClass("CreatureEvent", "", LuaScriptInterface::luaCreateCreatureEvent);
registerMethod("CreatureEvent", "type", LuaScriptInterface::luaCreatureEventType);
registerMethod("CreatureEvent", "register", LuaScriptInterface::luaCreatureEventRegister);
registerMethod("CreatureEvent", "onLogin", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onLogout", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onThink", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onPrepareDeath", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onDeath", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onKill", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onAdvance", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onModalWindow", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onTextEdit", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onHealthChange", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onManaChange", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onExtendedOpcode", LuaScriptInterface::luaCreatureEventOnCallback);
// MoveEvent
registerClass("MoveEvent", "", LuaScriptInterface::luaCreateMoveEvent);
registerMethod("MoveEvent", "type", LuaScriptInterface::luaMoveEventType);
registerMethod("MoveEvent", "register", LuaScriptInterface::luaMoveEventRegister);
registerMethod("MoveEvent", "level", LuaScriptInterface::luaMoveEventLevel);
registerMethod("MoveEvent", "magicLevel", LuaScriptInterface::luaMoveEventMagLevel);
registerMethod("MoveEvent", "slot", LuaScriptInterface::luaMoveEventSlot);
registerMethod("MoveEvent", "id", LuaScriptInterface::luaMoveEventItemId);
registerMethod("MoveEvent", "aid", LuaScriptInterface::luaMoveEventActionId);
registerMethod("MoveEvent", "uid", LuaScriptInterface::luaMoveEventUniqueId);
registerMethod("MoveEvent", "position", LuaScriptInterface::luaMoveEventPosition);
registerMethod("MoveEvent", "premium", LuaScriptInterface::luaMoveEventPremium);
registerMethod("MoveEvent", "vocation", LuaScriptInterface::luaMoveEventVocation);
registerMethod("MoveEvent", "tileItem", LuaScriptInterface::luaMoveEventTileItem);
registerMethod("MoveEvent", "onEquip", LuaScriptInterface::luaMoveEventOnCallback);
registerMethod("MoveEvent", "onDeEquip", LuaScriptInterface::luaMoveEventOnCallback);
registerMethod("MoveEvent", "onStepIn", LuaScriptInterface::luaMoveEventOnCallback);
registerMethod("MoveEvent", "onStepOut", LuaScriptInterface::luaMoveEventOnCallback);
registerMethod("MoveEvent", "onAddItem", LuaScriptInterface::luaMoveEventOnCallback);
registerMethod("MoveEvent", "onRemoveItem", LuaScriptInterface::luaMoveEventOnCallback);
// GlobalEvent
registerClass("GlobalEvent", "", LuaScriptInterface::luaCreateGlobalEvent);
registerMethod("GlobalEvent", "type", LuaScriptInterface::luaGlobalEventType);
registerMethod("GlobalEvent", "register", LuaScriptInterface::luaGlobalEventRegister);
registerMethod("GlobalEvent", "time", LuaScriptInterface::luaGlobalEventTime);
registerMethod("GlobalEvent", "interval", LuaScriptInterface::luaGlobalEventInterval);
registerMethod("GlobalEvent", "onThink", LuaScriptInterface::luaGlobalEventOnCallback);
registerMethod("GlobalEvent", "onTime", LuaScriptInterface::luaGlobalEventOnCallback);
registerMethod("GlobalEvent", "onStartup", LuaScriptInterface::luaGlobalEventOnCallback);
registerMethod("GlobalEvent", "onShutdown", LuaScriptInterface::luaGlobalEventOnCallback);
registerMethod("GlobalEvent", "onRecord", LuaScriptInterface::luaGlobalEventOnCallback);
// Weapon
registerClass("Weapon", "", LuaScriptInterface::luaCreateWeapon);
registerMethod("Weapon", "action", LuaScriptInterface::luaWeaponAction);
registerMethod("Weapon", "register", LuaScriptInterface::luaWeaponRegister);
registerMethod("Weapon", "id", LuaScriptInterface::luaWeaponId);
registerMethod("Weapon", "level", LuaScriptInterface::luaWeaponLevel);
registerMethod("Weapon", "magicLevel", LuaScriptInterface::luaWeaponMagicLevel);
registerMethod("Weapon", "mana", LuaScriptInterface::luaWeaponMana);
registerMethod("Weapon", "manaPercent", LuaScriptInterface::luaWeaponManaPercent);
registerMethod("Weapon", "health", LuaScriptInterface::luaWeaponHealth);
registerMethod("Weapon", "healthPercent", LuaScriptInterface::luaWeaponHealthPercent);
registerMethod("Weapon", "soul", LuaScriptInterface::luaWeaponSoul);
registerMethod("Weapon", "breakChance", LuaScriptInterface::luaWeaponBreakChance);
registerMethod("Weapon", "premium", LuaScriptInterface::luaWeaponPremium);
registerMethod("Weapon", "wieldUnproperly", LuaScriptInterface::luaWeaponUnproperly);
registerMethod("Weapon", "vocation", LuaScriptInterface::luaWeaponVocation);
registerMethod("Weapon", "onUseWeapon", LuaScriptInterface::luaWeaponOnUseWeapon);
registerMethod("Weapon", "element", LuaScriptInterface::luaWeaponElement);
registerMethod("Weapon", "attack", LuaScriptInterface::luaWeaponAttack);
registerMethod("Weapon", "defense", LuaScriptInterface::luaWeaponDefense);
registerMethod("Weapon", "range", LuaScriptInterface::luaWeaponRange);
registerMethod("Weapon", "charges", LuaScriptInterface::luaWeaponCharges);
registerMethod("Weapon", "duration", LuaScriptInterface::luaWeaponDuration);
registerMethod("Weapon", "decayTo", LuaScriptInterface::luaWeaponDecayTo);
registerMethod("Weapon", "transformEquipTo", LuaScriptInterface::luaWeaponTransformEquipTo);
registerMethod("Weapon", "transformDeEquipTo", LuaScriptInterface::luaWeaponTransformDeEquipTo);
registerMethod("Weapon", "slotType", LuaScriptInterface::luaWeaponSlotType);
registerMethod("Weapon", "hitChance", LuaScriptInterface::luaWeaponHitChance);
registerMethod("Weapon", "extraElement", LuaScriptInterface::luaWeaponExtraElement);
// exclusively for distance weapons
registerMethod("Weapon", "ammoType", LuaScriptInterface::luaWeaponAmmoType);
registerMethod("Weapon", "maxHitChance", LuaScriptInterface::luaWeaponMaxHitChance);
// exclusively for wands
registerMethod("Weapon", "damage", LuaScriptInterface::luaWeaponWandDamage);
// exclusively for wands & distance weapons
registerMethod("Weapon", "shootType", LuaScriptInterface::luaWeaponShootType);
}
#undef registerEnum
#undef registerEnumIn
void LuaScriptInterface::registerClass(const std::string& className, const std::string& baseClass, lua_CFunction newFunction/* = nullptr*/)
{
// className = {}
lua_newtable(luaState);
lua_pushvalue(luaState, -1);
lua_setglobal(luaState, className.c_str());
int methods = lua_gettop(luaState);
// methodsTable = {}
lua_newtable(luaState);
int methodsTable = lua_gettop(luaState);
if (newFunction) {
// className.__call = newFunction
lua_pushcfunction(luaState, newFunction);
lua_setfield(luaState, methodsTable, "__call");
}
uint32_t parents = 0;
if (!baseClass.empty()) {
lua_getglobal(luaState, baseClass.c_str());
lua_rawgeti(luaState, -1, 'p');
parents = getNumber<uint32_t>(luaState, -1) + 1;
lua_pop(luaState, 1);
lua_setfield(luaState, methodsTable, "__index");
}
// setmetatable(className, methodsTable)
lua_setmetatable(luaState, methods);
// className.metatable = {}
luaL_newmetatable(luaState, className.c_str());
int metatable = lua_gettop(luaState);
// className.metatable.__metatable = className
lua_pushvalue(luaState, methods);
lua_setfield(luaState, metatable, "__metatable");
// className.metatable.__index = className
lua_pushvalue(luaState, methods);
lua_setfield(luaState, metatable, "__index");
// className.metatable['h'] = hash
lua_pushnumber(luaState, std::hash<std::string>()(className));
lua_rawseti(luaState, metatable, 'h');
// className.metatable['p'] = parents
lua_pushnumber(luaState, parents);
lua_rawseti(luaState, metatable, 'p');
// className.metatable['t'] = type
if (className == "Item") {
lua_pushnumber(luaState, LuaData_Item);
} else if (className == "Container") {
lua_pushnumber(luaState, LuaData_Container);
} else if (className == "Teleport") {
lua_pushnumber(luaState, LuaData_Teleport);
} else if (className == "Player") {
lua_pushnumber(luaState, LuaData_Player);
} else if (className == "Monster") {
lua_pushnumber(luaState, LuaData_Monster);
} else if (className == "Npc") {
lua_pushnumber(luaState, LuaData_Npc);
} else if (className == "Tile") {
lua_pushnumber(luaState, LuaData_Tile);
} else {
lua_pushnumber(luaState, LuaData_Unknown);
}
lua_rawseti(luaState, metatable, 't');
// pop className, className.metatable
lua_pop(luaState, 2);
}
void LuaScriptInterface::registerTable(const std::string& tableName)
{
// _G[tableName] = {}
lua_newtable(luaState);
lua_setglobal(luaState, tableName.c_str());
}
void LuaScriptInterface::registerMethod(const std::string& globalName, const std::string& methodName, lua_CFunction func)
{
// globalName.methodName = func
lua_getglobal(luaState, globalName.c_str());
lua_pushcfunction(luaState, func);
lua_setfield(luaState, -2, methodName.c_str());
// pop globalName
lua_pop(luaState, 1);
}
void LuaScriptInterface::registerMetaMethod(const std::string& className, const std::string& methodName, lua_CFunction func)
{
// className.metatable.methodName = func
luaL_getmetatable(luaState, className.c_str());
lua_pushcfunction(luaState, func);
lua_setfield(luaState, -2, methodName.c_str());
// pop className.metatable
lua_pop(luaState, 1);
}
void LuaScriptInterface::registerGlobalMethod(const std::string& functionName, lua_CFunction func)
{
// _G[functionName] = func
lua_pushcfunction(luaState, func);
lua_setglobal(luaState, functionName.c_str());
}
void LuaScriptInterface::registerVariable(const std::string& tableName, const std::string& name, lua_Number value)
{
// tableName.name = value
lua_getglobal(luaState, tableName.c_str());
setField(luaState, name.c_str(), value);
// pop tableName
lua_pop(luaState, 1);
}
void LuaScriptInterface::registerGlobalVariable(const std::string& name, lua_Number value)
{
// _G[name] = value
lua_pushnumber(luaState, value);
lua_setglobal(luaState, name.c_str());
}
void LuaScriptInterface::registerGlobalBoolean(const std::string& name, bool value)
{
// _G[name] = value
pushBoolean(luaState, value);
lua_setglobal(luaState, name.c_str());
}
int LuaScriptInterface::luaDoPlayerAddItem(lua_State* L)
{
//doPlayerAddItem(cid, itemid, <optional: default: 1> count/subtype, <optional: default: 1> canDropOnMap)
//doPlayerAddItem(cid, itemid, <optional: default: 1> count, <optional: default: 1> canDropOnMap, <optional: default: 1>subtype)
Player* player = getPlayer(L, 1);
if (!player) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint16_t itemId = getNumber<uint16_t>(L, 2);
int32_t count = getNumber<int32_t>(L, 3, 1);
bool canDropOnMap = getBoolean(L, 4, true);
uint16_t subType = getNumber<uint16_t>(L, 5, 1);
const ItemType& it = Item::items[itemId];
int32_t itemCount;
auto parameters = lua_gettop(L);
if (parameters > 4) {
//subtype already supplied, count then is the amount
itemCount = std::max<int32_t>(1, count);
} else if (it.hasSubType()) {
if (it.stackable) {
itemCount = static_cast<int32_t>(std::ceil(static_cast<float>(count) / 100));
} else {
itemCount = 1;
}
subType = count;
} else {
itemCount = std::max<int32_t>(1, count);
}
while (itemCount > 0) {
uint16_t stackCount = subType;
if (it.stackable && stackCount > 100) {
stackCount = 100;
}
Item* newItem = Item::CreateItem(itemId, stackCount);
if (!newItem) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
if (it.stackable) {
subType -= stackCount;
}
ReturnValue ret = g_game.internalPlayerAddItem(player, newItem, canDropOnMap);
if (ret != RETURNVALUE_NOERROR) {
delete newItem;
pushBoolean(L, false);
return 1;
}
if (--itemCount == 0) {
if (newItem->getParent()) {
uint32_t uid = getScriptEnv()->addThing(newItem);
lua_pushnumber(L, uid);
return 1;
} else {
//stackable item stacked with existing object, newItem will be released
pushBoolean(L, false);
return 1;
}
}
}
pushBoolean(L, false);
return 1;
}
int LuaScriptInterface::luaDebugPrint(lua_State* L)
{
//debugPrint(text)
reportErrorFunc(L, getString(L, -1));
return 0;
}
int LuaScriptInterface::luaGetWorldTime(lua_State* L)
{
//getWorldTime()
int16_t time = g_game.getWorldTime();
lua_pushnumber(L, time);
return 1;
}
int LuaScriptInterface::luaGetWorldLight(lua_State* L)
{
//getWorldLight()
LightInfo lightInfo = g_game.getWorldLightInfo();
lua_pushnumber(L, lightInfo.level);
lua_pushnumber(L, lightInfo.color);
return 2;
}
int LuaScriptInterface::luaSetWorldLight(lua_State* L)
{
//setWorldLight(level, color)
if (g_config.getBoolean(ConfigManager::DEFAULT_WORLD_LIGHT)) {
pushBoolean(L, false);
return 1;
}
LightInfo lightInfo;
lightInfo.level = getNumber<uint8_t>(L, 1);
lightInfo.color = getNumber<uint8_t>(L, 2);
g_game.setWorldLightInfo(lightInfo);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaGetWorldUpTime(lua_State* L)
{
//getWorldUpTime()
uint64_t uptime = (OTSYS_TIME() - ProtocolStatus::start) / 1000;
lua_pushnumber(L, uptime);
return 1;
}
int LuaScriptInterface::luaGetSubTypeName(lua_State* L)
{
// getSubTypeName(subType)
int32_t subType = getNumber<int32_t>(L, 1);
if (subType > 0) {
pushString(L, Item::items[subType].name);
} else {
lua_pushnil(L);
}
return 1;
}
bool LuaScriptInterface::getArea(lua_State* L, std::vector<uint32_t>& vec, uint32_t& rows)
{
lua_pushnil(L);
for (rows = 0; lua_next(L, -2) != 0; ++rows) {
if (!isTable(L, -1)) {
return false;
}
lua_pushnil(L);
while (lua_next(L, -2) != 0) {
if (!isNumber(L, -1)) {
return false;
}
vec.push_back(getNumber<uint32_t>(L, -1));
lua_pop(L, 1);
}
lua_pop(L, 1);
}
lua_pop(L, 1);
return (rows != 0);
}
int LuaScriptInterface::luaCreateCombatArea(lua_State* L)
{
//createCombatArea( {area}, <optional> {extArea} )
ScriptEnvironment* env = getScriptEnv();
if (env->getScriptId() != EVENT_ID_LOADING) {
reportErrorFunc(L, "This function can only be used while loading the script.");
pushBoolean(L, false);
return 1;
}
uint32_t areaId = g_luaEnvironment.createAreaObject(env->getScriptInterface());
AreaCombat* area = g_luaEnvironment.getAreaObject(areaId);
int parameters = lua_gettop(L);
if (parameters >= 2) {
uint32_t rowsExtArea;
std::vector<uint32_t> vecExtArea;
if (!isTable(L, 2) || !getArea(L, vecExtArea, rowsExtArea)) {
reportErrorFunc(L, "Invalid extended area table.");
pushBoolean(L, false);
return 1;
}
area->setupExtArea(vecExtArea, rowsExtArea);
}
uint32_t rowsArea = 0;
std::vector<uint32_t> vecArea;
if (!isTable(L, 1) || !getArea(L, vecArea, rowsArea)) {
reportErrorFunc(L, "Invalid area table.");
pushBoolean(L, false);
return 1;
}
area->setupArea(vecArea, rowsArea);
lua_pushnumber(L, areaId);
return 1;
}
int LuaScriptInterface::luaDoAreaCombat(lua_State* L)
{
//doAreaCombat(cid, type, pos, area, min, max, effect[, origin = ORIGIN_SPELL[, blockArmor = false[, blockShield = false[, ignoreResistances = false]]]])
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint32_t areaId = getNumber<uint32_t>(L, 4);
const AreaCombat* area = g_luaEnvironment.getAreaObject(areaId);
if (area || areaId == 0) {
CombatType_t combatType = getNumber<CombatType_t>(L, 2);
CombatParams params;
params.combatType = combatType;
params.impactEffect = getNumber<uint8_t>(L, 7);
params.blockedByArmor = getBoolean(L, 8, false);
params.blockedByShield = getBoolean(L, 9, false);
params.ignoreResistances = getBoolean(L, 10, false);
CombatDamage damage;
damage.origin = getNumber<CombatOrigin>(L, 8, ORIGIN_SPELL);
damage.primary.type = combatType;
damage.primary.value = normal_random(getNumber<int32_t>(L, 6), getNumber<int32_t>(L, 5));
Combat::doAreaCombat(creature, getPosition(L, 3), area, damage, params);
pushBoolean(L, true);
} else {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_AREA_NOT_FOUND));
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaDoTargetCombat(lua_State* L)
{
//doTargetCombat(cid, target, type, min, max, effect[, origin = ORIGIN_SPELL[, blockArmor = false[, blockShield = false[, ignoreResistances = false]]]])
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Creature* target = getCreature(L, 2);
if (!target) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
CombatType_t combatType = getNumber<CombatType_t>(L, 3);
CombatParams params;
params.combatType = combatType;
params.impactEffect = getNumber<uint8_t>(L, 6);
params.blockedByArmor = getBoolean(L, 8, false);
params.blockedByShield = getBoolean(L, 9, false);
params.ignoreResistances = getBoolean(L, 10, false);
CombatDamage damage;
damage.origin = getNumber<CombatOrigin>(L, 7, ORIGIN_SPELL);
damage.primary.type = combatType;
damage.primary.value = normal_random(getNumber<int32_t>(L, 4), getNumber<int32_t>(L, 5));
Combat::doTargetCombat(creature, target, damage, params);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaDoChallengeCreature(lua_State* L)
{
//doChallengeCreature(cid, target[, force = false])
Creature* creature = getCreature(L, 1);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Creature* target = getCreature(L, 2);
if (!target) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
target->challengeCreature(creature, getBoolean(L, 3, false));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaIsValidUID(lua_State* L)
{
//isValidUID(uid)
pushBoolean(L, getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, -1)) != nullptr);
return 1;
}
int LuaScriptInterface::luaIsDepot(lua_State* L)
{
//isDepot(uid)
Container* container = getScriptEnv()->getContainerByUID(getNumber<uint32_t>(L, -1));
pushBoolean(L, container && container->getDepotLocker());
return 1;
}
int LuaScriptInterface::luaIsMoveable(lua_State* L)
{
//isMoveable(uid)
//isMovable(uid)
Thing* thing = getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, -1));
pushBoolean(L, thing && thing->isPushable());
return 1;
}
int LuaScriptInterface::luaDoAddContainerItem(lua_State* L)
{
//doAddContainerItem(uid, itemid, <optional> count/subtype)
uint32_t uid = getNumber<uint32_t>(L, 1);
ScriptEnvironment* env = getScriptEnv();
Container* container = env->getContainerByUID(uid);
if (!container) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CONTAINER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint16_t itemId = getNumber<uint16_t>(L, 2);
const ItemType& it = Item::items[itemId];
int32_t itemCount = 1;
int32_t subType = 1;
uint32_t count = getNumber<uint32_t>(L, 3, 1);
if (it.hasSubType()) {
if (it.stackable) {
itemCount = static_cast<int32_t>(std::ceil(static_cast<float>(count) / 100));
}
subType = count;
} else {
itemCount = std::max<int32_t>(1, count);
}
while (itemCount > 0) {
int32_t stackCount = std::min<int32_t>(100, subType);
Item* newItem = Item::CreateItem(itemId, stackCount);
if (!newItem) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
if (it.stackable) {
subType -= stackCount;
}
ReturnValue ret = g_game.internalAddItem(container, newItem);
if (ret != RETURNVALUE_NOERROR) {
delete newItem;
pushBoolean(L, false);
return 1;
}
if (--itemCount == 0) {
if (newItem->getParent()) {
lua_pushnumber(L, env->addThing(newItem));
} else {
//stackable item stacked with existing object, newItem will be released
pushBoolean(L, false);
}
return 1;
}
}
pushBoolean(L, false);
return 1;
}
int LuaScriptInterface::luaGetDepotId(lua_State* L)
{
//getDepotId(uid)
uint32_t uid = getNumber<uint32_t>(L, -1);
Container* container = getScriptEnv()->getContainerByUID(uid);
if (!container) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CONTAINER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
DepotLocker* depotLocker = container->getDepotLocker();
if (!depotLocker) {
reportErrorFunc(L, "Depot not found");
pushBoolean(L, false);
return 1;
}
lua_pushnumber(L, depotLocker->getDepotId());
return 1;
}
int LuaScriptInterface::luaAddEvent(lua_State* L)
{
//addEvent(callback, delay, ...)
int parameters = lua_gettop(L);
if (parameters < 2) {
reportErrorFunc(L, fmt::format("Not enough parameters: {:d}.", parameters));
pushBoolean(L, false);
return 1;
}
if (!isFunction(L, 1)) {
reportErrorFunc(L, "callback parameter should be a function.");
pushBoolean(L, false);
return 1;
}
if (!isNumber(L, 2)) {
reportErrorFunc(L, "delay parameter should be a number.");
pushBoolean(L, false);
return 1;
}
if (g_config.getBoolean(ConfigManager::WARN_UNSAFE_SCRIPTS) || g_config.getBoolean(ConfigManager::CONVERT_UNSAFE_SCRIPTS)) {
std::vector<std::pair<int32_t, LuaDataType>> indexes;
for (int i = 3; i <= parameters; ++i) {
if (lua_getmetatable(L, i) == 0) {
continue;
}
lua_rawgeti(L, -1, 't');
LuaDataType type = getNumber<LuaDataType>(L, -1);
if (type != LuaData_Unknown && type != LuaData_Tile) {
indexes.push_back({i, type});
}
lua_pop(L, 2);
}
if (!indexes.empty()) {
if (g_config.getBoolean(ConfigManager::WARN_UNSAFE_SCRIPTS)) {
bool plural = indexes.size() > 1;
std::string warningString = "Argument";
if (plural) {
warningString += 's';
}
for (const auto& entry : indexes) {
if (entry == indexes.front()) {
warningString += ' ';
} else if (entry == indexes.back()) {
warningString += " and ";
} else {
warningString += ", ";
}
warningString += '#';
warningString += std::to_string(entry.first);
}
if (plural) {
warningString += " are unsafe";
} else {
warningString += " is unsafe";
}
reportErrorFunc(L, warningString);
}
if (g_config.getBoolean(ConfigManager::CONVERT_UNSAFE_SCRIPTS)) {
for (const auto& entry : indexes) {
switch (entry.second) {
case LuaData_Item:
case LuaData_Container:
case LuaData_Teleport: {
lua_getglobal(L, "Item");
lua_getfield(L, -1, "getUniqueId");
break;
}
case LuaData_Player:
case LuaData_Monster:
case LuaData_Npc: {
lua_getglobal(L, "Creature");
lua_getfield(L, -1, "getId");
break;
}
default:
break;
}
lua_replace(L, -2);
lua_pushvalue(L, entry.first);
lua_call(L, 1, 1);
lua_replace(L, entry.first);
}
}
}
}
LuaTimerEventDesc eventDesc;
eventDesc.parameters.reserve(parameters - 2); // safe to use -2 since we garanteed that there is at least two parameters
for (int i = 0; i < parameters - 2; ++i) {
eventDesc.parameters.push_back(luaL_ref(L, LUA_REGISTRYINDEX));
}
uint32_t delay = std::max<uint32_t>(100, getNumber<uint32_t>(L, 2));
lua_pop(L, 1);
eventDesc.function = luaL_ref(L, LUA_REGISTRYINDEX);
eventDesc.scriptId = getScriptEnv()->getScriptId();
auto& lastTimerEventId = g_luaEnvironment.lastEventTimerId;
eventDesc.eventId = g_scheduler.addEvent(createSchedulerTask(
delay, std::bind(&LuaEnvironment::executeTimerEvent, &g_luaEnvironment, lastTimerEventId)
));
g_luaEnvironment.timerEvents.emplace(lastTimerEventId, std::move(eventDesc));
lua_pushnumber(L, lastTimerEventId++);
return 1;
}
int LuaScriptInterface::luaStopEvent(lua_State* L)
{
//stopEvent(eventid)
uint32_t eventId = getNumber<uint32_t>(L, 1);
auto& timerEvents = g_luaEnvironment.timerEvents;
auto it = timerEvents.find(eventId);
if (it == timerEvents.end()) {
pushBoolean(L, false);
return 1;
}
LuaTimerEventDesc timerEventDesc = std::move(it->second);
timerEvents.erase(it);
g_scheduler.stopEvent(timerEventDesc.eventId);
luaL_unref(L, LUA_REGISTRYINDEX, timerEventDesc.function);
for (auto parameter : timerEventDesc.parameters) {
luaL_unref(L, LUA_REGISTRYINDEX, parameter);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaSaveServer(lua_State* L)
{
g_game.saveGameState();
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCleanMap(lua_State* L)
{
lua_pushnumber(L, g_game.map.clean());
return 1;
}
int LuaScriptInterface::luaIsInWar(lua_State* L)
{
//isInWar(cid, target)
Player* player = getPlayer(L, 1);
if (!player) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Player* targetPlayer = getPlayer(L, 2);
if (!targetPlayer) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, player->isInWar(targetPlayer));
return 1;
}
int LuaScriptInterface::luaGetWaypointPositionByName(lua_State* L)
{
//getWaypointPositionByName(name)
auto& waypoints = g_game.map.waypoints;
auto it = waypoints.find(getString(L, -1));
if (it != waypoints.end()) {
pushPosition(L, it->second);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaSendChannelMessage(lua_State* L)
{
//sendChannelMessage(channelId, type, message)
uint32_t channelId = getNumber<uint32_t>(L, 1);
ChatChannel* channel = g_chat->getChannelById(channelId);
if (!channel) {
pushBoolean(L, false);
return 1;
}
SpeakClasses type = getNumber<SpeakClasses>(L, 2);
std::string message = getString(L, 3);
channel->sendToAll(message, type);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaSendGuildChannelMessage(lua_State* L)
{
//sendGuildChannelMessage(guildId, type, message)
uint32_t guildId = getNumber<uint32_t>(L, 1);
ChatChannel* channel = g_chat->getGuildChannelById(guildId);
if (!channel) {
pushBoolean(L, false);
return 1;
}
SpeakClasses type = getNumber<SpeakClasses>(L, 2);
std::string message = getString(L, 3);
channel->sendToAll(message, type);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaIsScriptsInterface(lua_State* L)
{
//isScriptsInterface()
if (getScriptEnv()->getScriptInterface() == &g_scripts->getScriptInterface()) {
pushBoolean(L, true);
} else {
reportErrorFunc(L, "EventCallback: can only be called inside (data/scripts/)");
pushBoolean(L, false);
}
return 1;
}
std::string LuaScriptInterface::escapeString(const std::string& string)
{
std::string s = string;
replaceString(s, "\\", "\\\\");
replaceString(s, "\"", "\\\"");
replaceString(s, "'", "\\'");
replaceString(s, "[[", "\\[[");
return s;
}
#ifndef LUAJIT_VERSION
const luaL_Reg LuaScriptInterface::luaBitReg[] = {
//{"tobit", LuaScriptInterface::luaBitToBit},
{"bnot", LuaScriptInterface::luaBitNot},
{"band", LuaScriptInterface::luaBitAnd},
{"bor", LuaScriptInterface::luaBitOr},
{"bxor", LuaScriptInterface::luaBitXor},
{"lshift", LuaScriptInterface::luaBitLeftShift},
{"rshift", LuaScriptInterface::luaBitRightShift},
//{"arshift", LuaScriptInterface::luaBitArithmeticalRightShift},
//{"rol", LuaScriptInterface::luaBitRotateLeft},
//{"ror", LuaScriptInterface::luaBitRotateRight},
//{"bswap", LuaScriptInterface::luaBitSwapEndian},
//{"tohex", LuaScriptInterface::luaBitToHex},
{nullptr, nullptr}
};
int LuaScriptInterface::luaBitNot(lua_State* L)
{
lua_pushnumber(L, ~getNumber<uint32_t>(L, -1));
return 1;
}
#define MULTIOP(name, op) \
int LuaScriptInterface::luaBit##name(lua_State* L) \
{ \
int n = lua_gettop(L); \
uint32_t w = getNumber<uint32_t>(L, -1); \
for (int i = 1; i < n; ++i) \
w op getNumber<uint32_t>(L, i); \
lua_pushnumber(L, w); \
return 1; \
}
MULTIOP(And, &= )
MULTIOP(Or, |= )
MULTIOP(Xor, ^= )
#define SHIFTOP(name, op) \
int LuaScriptInterface::luaBit##name(lua_State* L) \
{ \
uint32_t n1 = getNumber<uint32_t>(L, 1), n2 = getNumber<uint32_t>(L, 2); \
lua_pushnumber(L, (n1 op n2)); \
return 1; \
}
SHIFTOP(LeftShift, << )
SHIFTOP(RightShift, >> )
#endif
const luaL_Reg LuaScriptInterface::luaConfigManagerTable[] = {
{"getString", LuaScriptInterface::luaConfigManagerGetString},
{"getNumber", LuaScriptInterface::luaConfigManagerGetNumber},
{"getBoolean", LuaScriptInterface::luaConfigManagerGetBoolean},
{nullptr, nullptr}
};
int LuaScriptInterface::luaConfigManagerGetString(lua_State* L)
{
pushString(L, g_config.getString(getNumber<ConfigManager::string_config_t>(L, -1)));
return 1;
}
int LuaScriptInterface::luaConfigManagerGetNumber(lua_State* L)
{
lua_pushnumber(L, g_config.getNumber(getNumber<ConfigManager::integer_config_t>(L, -1)));
return 1;
}
int LuaScriptInterface::luaConfigManagerGetBoolean(lua_State* L)
{
pushBoolean(L, g_config.getBoolean(getNumber<ConfigManager::boolean_config_t>(L, -1)));
return 1;
}
const luaL_Reg LuaScriptInterface::luaDatabaseTable[] = {
{"query", LuaScriptInterface::luaDatabaseExecute},
{"asyncQuery", LuaScriptInterface::luaDatabaseAsyncExecute},
{"storeQuery", LuaScriptInterface::luaDatabaseStoreQuery},
{"asyncStoreQuery", LuaScriptInterface::luaDatabaseAsyncStoreQuery},
{"escapeString", LuaScriptInterface::luaDatabaseEscapeString},
{"escapeBlob", LuaScriptInterface::luaDatabaseEscapeBlob},
{"lastInsertId", LuaScriptInterface::luaDatabaseLastInsertId},
{"tableExists", LuaScriptInterface::luaDatabaseTableExists},
{nullptr, nullptr}
};
int LuaScriptInterface::luaDatabaseExecute(lua_State* L)
{
pushBoolean(L, Database::getInstance().executeQuery(getString(L, -1)));
return 1;
}
int LuaScriptInterface::luaDatabaseAsyncExecute(lua_State* L)
{
std::function<void(DBResult_ptr, bool)> callback;
if (lua_gettop(L) > 1) {
int32_t ref = luaL_ref(L, LUA_REGISTRYINDEX);
auto scriptId = getScriptEnv()->getScriptId();
callback = [ref, scriptId](DBResult_ptr, bool success) {
lua_State* luaState = g_luaEnvironment.getLuaState();
if (!luaState) {
return;
}
if (!LuaScriptInterface::reserveScriptEnv()) {
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
return;
}
lua_rawgeti(luaState, LUA_REGISTRYINDEX, ref);
pushBoolean(luaState, success);
auto env = getScriptEnv();
env->setScriptId(scriptId, &g_luaEnvironment);
g_luaEnvironment.callFunction(1);
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
};
}
g_databaseTasks.addTask(getString(L, -1), callback);
return 0;
}
int LuaScriptInterface::luaDatabaseStoreQuery(lua_State* L)
{
if (DBResult_ptr res = Database::getInstance().storeQuery(getString(L, -1))) {
lua_pushnumber(L, ScriptEnvironment::addResult(res));
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaDatabaseAsyncStoreQuery(lua_State* L)
{
std::function<void(DBResult_ptr, bool)> callback;
if (lua_gettop(L) > 1) {
int32_t ref = luaL_ref(L, LUA_REGISTRYINDEX);
auto scriptId = getScriptEnv()->getScriptId();
callback = [ref, scriptId](DBResult_ptr result, bool) {
lua_State* luaState = g_luaEnvironment.getLuaState();
if (!luaState) {
return;
}
if (!LuaScriptInterface::reserveScriptEnv()) {
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
return;
}
lua_rawgeti(luaState, LUA_REGISTRYINDEX, ref);
if (result) {
lua_pushnumber(luaState, ScriptEnvironment::addResult(result));
} else {
pushBoolean(luaState, false);
}
auto env = getScriptEnv();
env->setScriptId(scriptId, &g_luaEnvironment);
g_luaEnvironment.callFunction(1);
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
};
}
g_databaseTasks.addTask(getString(L, -1), callback, true);
return 0;
}
int LuaScriptInterface::luaDatabaseEscapeString(lua_State* L)
{
pushString(L, Database::getInstance().escapeString(getString(L, -1)));
return 1;
}
int LuaScriptInterface::luaDatabaseEscapeBlob(lua_State* L)
{
uint32_t length = getNumber<uint32_t>(L, 2);
pushString(L, Database::getInstance().escapeBlob(getString(L, 1).c_str(), length));
return 1;
}
int LuaScriptInterface::luaDatabaseLastInsertId(lua_State* L)
{
lua_pushnumber(L, Database::getInstance().getLastInsertId());
return 1;
}
int LuaScriptInterface::luaDatabaseTableExists(lua_State* L)
{
pushBoolean(L, DatabaseManager::tableExists(getString(L, -1)));
return 1;
}
const luaL_Reg LuaScriptInterface::luaResultTable[] = {
{"getNumber", LuaScriptInterface::luaResultGetNumber},
{"getString", LuaScriptInterface::luaResultGetString},
{"getStream", LuaScriptInterface::luaResultGetStream},
{"next", LuaScriptInterface::luaResultNext},
{"free", LuaScriptInterface::luaResultFree},
{nullptr, nullptr}
};
int LuaScriptInterface::luaResultGetNumber(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, 1));
if (!res) {
pushBoolean(L, false);
return 1;
}
const std::string& s = getString(L, 2);
lua_pushnumber(L, res->getNumber<int64_t>(s));
return 1;
}
int LuaScriptInterface::luaResultGetString(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, 1));
if (!res) {
pushBoolean(L, false);
return 1;
}
const std::string& s = getString(L, 2);
pushString(L, res->getString(s));
return 1;
}
int LuaScriptInterface::luaResultGetStream(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, 1));
if (!res) {
pushBoolean(L, false);
return 1;
}
unsigned long length;
const char* stream = res->getStream(getString(L, 2), length);
lua_pushlstring(L, stream, length);
lua_pushnumber(L, length);
return 2;
}
int LuaScriptInterface::luaResultNext(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, -1));
if (!res) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, res->next());
return 1;
}
int LuaScriptInterface::luaResultFree(lua_State* L)
{
pushBoolean(L, ScriptEnvironment::removeResult(getNumber<uint32_t>(L, -1)));
return 1;
}
// Userdata
int LuaScriptInterface::luaUserdataCompare(lua_State* L)
{
// userdataA == userdataB
pushBoolean(L, getUserdata<void>(L, 1) == getUserdata<void>(L, 2));
return 1;
}
// _G
int LuaScriptInterface::luaIsType(lua_State* L)
{
// isType(derived, base)
lua_getmetatable(L, -2);
lua_getmetatable(L, -2);
lua_rawgeti(L, -2, 'p');
uint_fast8_t parentsB = getNumber<uint_fast8_t>(L, 1);
lua_rawgeti(L, -3, 'h');
size_t hashB = getNumber<size_t>(L, 1);
lua_rawgeti(L, -3, 'p');
uint_fast8_t parentsA = getNumber<uint_fast8_t>(L, 1);
for (uint_fast8_t i = parentsA; i < parentsB; ++i) {
lua_getfield(L, -3, "__index");
lua_replace(L, -4);
}
lua_rawgeti(L, -4, 'h');
size_t hashA = getNumber<size_t>(L, 1);
pushBoolean(L, hashA == hashB);
return 1;
}
int LuaScriptInterface::luaRawGetMetatable(lua_State* L)
{
// rawgetmetatable(metatableName)
luaL_getmetatable(L, getString(L, 1).c_str());
return 1;
}
// os
int LuaScriptInterface::luaSystemTime(lua_State* L)
{
// os.mtime()
lua_pushnumber(L, OTSYS_TIME());
return 1;
}
// table
int LuaScriptInterface::luaTableCreate(lua_State* L)
{
// table.create(arrayLength, keyLength)
lua_createtable(L, getNumber<int32_t>(L, 1), getNumber<int32_t>(L, 2));
return 1;
}
int LuaScriptInterface::luaTablePack(lua_State* L)
{
// table.pack(...)
int i;
int n = lua_gettop(L); /* number of elements to pack */
lua_createtable(L, n, 1); /* create result table */
lua_insert(L, 1); /* put it at index 1 */
for (i = n; i >= 1; i--) /* assign elements */
lua_rawseti(L, 1, i);
if (luaL_callmeta(L, -1, "__index") != 0) {
lua_replace(L, -2);
}
lua_pushinteger(L, n);
lua_setfield(L, 1, "n"); /* t.n = number of elements */
return 1; /* return table */
}
// Game
int LuaScriptInterface::luaGameGetSpectators(lua_State* L)
{
// Game.getSpectators(position[, multifloor = false[, onlyPlayer = false[, minRangeX = 0[, maxRangeX = 0[, minRangeY = 0[, maxRangeY = 0]]]]]])
const Position& position = getPosition(L, 1);
bool multifloor = getBoolean(L, 2, false);
bool onlyPlayers = getBoolean(L, 3, false);
int32_t minRangeX = getNumber<int32_t>(L, 4, 0);
int32_t maxRangeX = getNumber<int32_t>(L, 5, 0);
int32_t minRangeY = getNumber<int32_t>(L, 6, 0);
int32_t maxRangeY = getNumber<int32_t>(L, 7, 0);
SpectatorVec spectators;
g_game.map.getSpectators(spectators, position, multifloor, onlyPlayers, minRangeX, maxRangeX, minRangeY, maxRangeY);
lua_createtable(L, spectators.size(), 0);
int index = 0;
for (Creature* creature : spectators) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameGetPlayers(lua_State* L)
{
// Game.getPlayers()
lua_createtable(L, g_game.getPlayersOnline(), 0);
int index = 0;
for (const auto& playerEntry : g_game.getPlayers()) {
pushUserdata<Player>(L, playerEntry.second);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameLoadMap(lua_State* L)
{
// Game.loadMap(path)
const std::string& path = getString(L, 1);
g_dispatcher.addTask(createTask([path]() {
try {
g_game.loadMap(path);
} catch (const std::exception& e) {
// FIXME: Should only catch some exceptions
std::cout << "[Error - LuaScriptInterface::luaGameLoadMap] Failed to load map: "
<< e.what() << std::endl;
}
}));
return 0;
}
int LuaScriptInterface::luaGameGetExperienceStage(lua_State* L)
{
// Game.getExperienceStage(level)
uint32_t level = getNumber<uint32_t>(L, 1);
lua_pushnumber(L, g_config.getExperienceStage(level));
return 1;
}
int LuaScriptInterface::luaGameGetExperienceForLevel(lua_State* L)
{
// Game.getExperienceForLevel(level)
const uint32_t level = getNumber<uint32_t>(L, 1);
if (level == 0) {
lua_pushnumber(L, 0);
} else {
lua_pushnumber(L, Player::getExpForLevel(level));
}
return 1;
}
int LuaScriptInterface::luaGameGetMonsterCount(lua_State* L)
{
// Game.getMonsterCount()
lua_pushnumber(L, g_game.getMonstersOnline());
return 1;
}
int LuaScriptInterface::luaGameGetPlayerCount(lua_State* L)
{
// Game.getPlayerCount()
lua_pushnumber(L, g_game.getPlayersOnline());
return 1;
}
int LuaScriptInterface::luaGameGetNpcCount(lua_State* L)
{
// Game.getNpcCount()
lua_pushnumber(L, g_game.getNpcsOnline());
return 1;
}
int LuaScriptInterface::luaGameGetMonsterTypes(lua_State* L)
{
// Game.getMonsterTypes()
auto& type = g_monsters.monsters;
lua_createtable(L, type.size(), 0);
for (auto& mType : type) {
pushUserdata<MonsterType>(L, &mType.second);
setMetatable(L, -1, "MonsterType");
lua_setfield(L, -2, mType.first.c_str());
}
return 1;
}
int LuaScriptInterface::luaGameGetTowns(lua_State* L)
{
// Game.getTowns()
const auto& towns = g_game.map.towns.getTowns();
lua_createtable(L, towns.size(), 0);
int index = 0;
for (auto townEntry : towns) {
pushUserdata<Town>(L, townEntry.second);
setMetatable(L, -1, "Town");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameGetHouses(lua_State* L)
{
// Game.getHouses()
const auto& houses = g_game.map.houses.getHouses();
lua_createtable(L, houses.size(), 0);
int index = 0;
for (auto houseEntry : houses) {
pushUserdata<House>(L, houseEntry.second);
setMetatable(L, -1, "House");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameGetGameState(lua_State* L)
{
// Game.getGameState()
lua_pushnumber(L, g_game.getGameState());
return 1;
}
int LuaScriptInterface::luaGameSetGameState(lua_State* L)
{
// Game.setGameState(state)
GameState_t state = getNumber<GameState_t>(L, 1);
g_game.setGameState(state);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaGameGetWorldType(lua_State* L)
{
// Game.getWorldType()
lua_pushnumber(L, g_game.getWorldType());
return 1;
}
int LuaScriptInterface::luaGameSetWorldType(lua_State* L)
{
// Game.setWorldType(type)
WorldType_t type = getNumber<WorldType_t>(L, 1);
g_game.setWorldType(type);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaGameGetReturnMessage(lua_State* L)
{
// Game.getReturnMessage(value)
ReturnValue value = getNumber<ReturnValue>(L, 1);
pushString(L, getReturnMessage(value));
return 1;
}
int LuaScriptInterface::luaGameGetItemAttributeByName(lua_State* L)
{
// Game.getItemAttributeByName(name)
lua_pushnumber(L, stringToItemAttribute(getString(L, 1)));
return 1;
}
int LuaScriptInterface::luaGameCreateItem(lua_State* L)
{
// Game.createItem(itemId[, count[, position]])
uint16_t count = getNumber<uint16_t>(L, 2, 1);
uint16_t id;
if (isNumber(L, 1)) {
id = getNumber<uint16_t>(L, 1);
} else {
id = Item::items.getItemIdByName(getString(L, 1));
if (id == 0) {
lua_pushnil(L);
return 1;
}
}
const ItemType& it = Item::items[id];
if (it.stackable) {
count = std::min<uint16_t>(count, 100);
}
Item* item = Item::CreateItem(id, count);
if (!item) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) >= 3) {
const Position& position = getPosition(L, 3);
Tile* tile = g_game.map.getTile(position);
if (!tile) {
delete item;
lua_pushnil(L);
return 1;
}
g_game.internalAddItem(tile, item, INDEX_WHEREEVER, FLAG_NOLIMIT);
} else {
getScriptEnv()->addTempItem(item);
item->setParent(VirtualCylinder::virtualCylinder);
}
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
int LuaScriptInterface::luaGameCreateContainer(lua_State* L)
{
// Game.createContainer(itemId, size[, position])
uint16_t size = getNumber<uint16_t>(L, 2);
uint16_t id;
if (isNumber(L, 1)) {
id = getNumber<uint16_t>(L, 1);
} else {
id = Item::items.getItemIdByName(getString(L, 1));
if (id == 0) {
lua_pushnil(L);
return 1;
}
}
Container* container = Item::CreateItemAsContainer(id, size);
if (!container) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) >= 3) {
const Position& position = getPosition(L, 3);
Tile* tile = g_game.map.getTile(position);
if (!tile) {
delete container;
lua_pushnil(L);
return 1;
}
g_game.internalAddItem(tile, container, INDEX_WHEREEVER, FLAG_NOLIMIT);
} else {
getScriptEnv()->addTempItem(container);
container->setParent(VirtualCylinder::virtualCylinder);
}
pushUserdata<Container>(L, container);
setMetatable(L, -1, "Container");
return 1;
}
int LuaScriptInterface::luaGameCreateMonster(lua_State* L)
{
// Game.createMonster(monsterName, position[, extended = false[, force = false]])
Monster* monster = Monster::createMonster(getString(L, 1));
if (!monster) {
lua_pushnil(L);
return 1;
}
const Position& position = getPosition(L, 2);
bool extended = getBoolean(L, 3, false);
bool force = getBoolean(L, 4, false);
if (g_events->eventMonsterOnSpawn(monster, position, false, true) || force) {
if (g_game.placeCreature(monster, position, extended, force)) {
pushUserdata<Monster>(L, monster);
setMetatable(L, -1, "Monster");
} else {
delete monster;
lua_pushnil(L);
}
} else {
delete monster;
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGameCreateNpc(lua_State* L)
{
// Game.createNpc(npcName, position[, extended = false[, force = false]])
Npc* npc = Npc::createNpc(getString(L, 1));
if (!npc) {
lua_pushnil(L);
return 1;
}
const Position& position = getPosition(L, 2);
bool extended = getBoolean(L, 3, false);
bool force = getBoolean(L, 4, false);
if (g_game.placeCreature(npc, position, extended, force)) {
pushUserdata<Npc>(L, npc);
setMetatable(L, -1, "Npc");
} else {
delete npc;
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGameCreateTile(lua_State* L)
{
// Game.createTile(x, y, z[, isDynamic = false])
// Game.createTile(position[, isDynamic = false])
Position position;
bool isDynamic;
if (isTable(L, 1)) {
position = getPosition(L, 1);
isDynamic = getBoolean(L, 2, false);
} else {
position.x = getNumber<uint16_t>(L, 1);
position.y = getNumber<uint16_t>(L, 2);
position.z = getNumber<uint16_t>(L, 3);
isDynamic = getBoolean(L, 4, false);
}
Tile* tile = g_game.map.getTile(position);
if (!tile) {
if (isDynamic) {
tile = new DynamicTile(position.x, position.y, position.z);
} else {
tile = new StaticTile(position.x, position.y, position.z);
}
g_game.map.setTile(position, tile);
}
pushUserdata(L, tile);
setMetatable(L, -1, "Tile");
return 1;
}
int LuaScriptInterface::luaGameCreateMonsterType(lua_State* L)
{
// Game.createMonsterType(name)
if (getScriptEnv()->getScriptInterface() != &g_scripts->getScriptInterface()) {
reportErrorFunc(L, "MonsterTypes can only be registered in the Scripts interface.");
lua_pushnil(L);
return 1;
}
const std::string& name = getString(L, 1);
if (name.length() == 0) {
lua_pushnil(L);
return 1;
}
MonsterType* monsterType = g_monsters.getMonsterType(name, false);
if (!monsterType) {
monsterType = &g_monsters.monsters[asLowerCaseString(name)];
monsterType->name = name;
monsterType->nameDescription = "a " + name;
} else {
monsterType->info.lootItems.clear();
monsterType->info.attackSpells.clear();
monsterType->info.defenseSpells.clear();
monsterType->info.scripts.clear();
monsterType->info.thinkEvent = -1;
monsterType->info.creatureAppearEvent = -1;
monsterType->info.creatureDisappearEvent = -1;
monsterType->info.creatureMoveEvent = -1;
monsterType->info.creatureSayEvent = -1;
}
pushUserdata<MonsterType>(L, monsterType);
setMetatable(L, -1, "MonsterType");
return 1;
}
int LuaScriptInterface::luaGameStartRaid(lua_State* L)
{
// Game.startRaid(raidName)
const std::string& raidName = getString(L, 1);
Raid* raid = g_game.raids.getRaidByName(raidName);
if (!raid || !raid->isLoaded()) {
lua_pushnumber(L, RETURNVALUE_NOSUCHRAIDEXISTS);
return 1;
}
if (g_game.raids.getRunning()) {
lua_pushnumber(L, RETURNVALUE_ANOTHERRAIDISALREADYEXECUTING);
return 1;
}
g_game.raids.setRunning(raid);
raid->startRaid();
lua_pushnumber(L, RETURNVALUE_NOERROR);
return 1;
}
int LuaScriptInterface::luaGameGetClientVersion(lua_State* L)
{
// Game.getClientVersion()
lua_createtable(L, 0, 3);
setField(L, "min", CLIENT_VERSION_MIN);
setField(L, "max", CLIENT_VERSION_MAX);
setField(L, "string", CLIENT_VERSION_STR);
return 1;
}
int LuaScriptInterface::luaGameReload(lua_State* L)
{
// Game.reload(reloadType)
ReloadTypes_t reloadType = getNumber<ReloadTypes_t>(L, 1);
if (reloadType == RELOAD_TYPE_GLOBAL) {
pushBoolean(L, g_luaEnvironment.loadFile("data/global.lua") == 0);
pushBoolean(L, g_scripts->loadScripts("scripts/lib", true, true));
} else {
pushBoolean(L, g_game.reload(reloadType));
}
lua_gc(g_luaEnvironment.getLuaState(), LUA_GCCOLLECT, 0);
return 1;
}
int LuaScriptInterface::luaGameGetAccountStorageValue(lua_State* L)
{
// Game.getAccountStorageValue(accountId, key)
uint32_t accountId = getNumber<uint32_t>(L, 1);
uint32_t key = getNumber<uint32_t>(L, 2);
lua_pushnumber(L, g_game.getAccountStorageValue(accountId, key));
return 1;
}
int LuaScriptInterface::luaGameSetAccountStorageValue(lua_State* L)
{
// Game.setAccountStorageValue(accountId, key, value)
uint32_t accountId = getNumber<uint32_t>(L, 1);
uint32_t key = getNumber<uint32_t>(L, 2);
int32_t value = getNumber<int32_t>(L, 3);
g_game.setAccountStorageValue(accountId, key, value);
lua_pushboolean(L, true);
return 1;
}
int LuaScriptInterface::luaGameSaveAccountStorageValues(lua_State* L)
{
// Game.saveAccountStorageValues()
lua_pushboolean(L, g_game.saveAccountStorageValues());
return 1;
}
// Variant
int LuaScriptInterface::luaVariantCreate(lua_State* L)
{
// Variant(number or string or position or thing)
LuaVariant variant;
if (isUserdata(L, 2)) {
if (Thing* thing = getThing(L, 2)) {
variant.type = VARIANT_TARGETPOSITION;
variant.pos = thing->getPosition();
}
} else if (isTable(L, 2)) {
variant.type = VARIANT_POSITION;
variant.pos = getPosition(L, 2);
} else if (isNumber(L, 2)) {
variant.type = VARIANT_NUMBER;
variant.number = getNumber<uint32_t>(L, 2);
} else if (isString(L, 2)) {
variant.type = VARIANT_STRING;
variant.text = getString(L, 2);
}
pushVariant(L, variant);
return 1;
}
int LuaScriptInterface::luaVariantGetNumber(lua_State* L)
{
// Variant:getNumber()
const LuaVariant& variant = getVariant(L, 1);
if (variant.type == VARIANT_NUMBER) {
lua_pushnumber(L, variant.number);
} else {
lua_pushnumber(L, 0);
}
return 1;
}
int LuaScriptInterface::luaVariantGetString(lua_State* L)
{
// Variant:getString()
const LuaVariant& variant = getVariant(L, 1);
if (variant.type == VARIANT_STRING) {
pushString(L, variant.text);
} else {
pushString(L, std::string());
}
return 1;
}
int LuaScriptInterface::luaVariantGetPosition(lua_State* L)
{
// Variant:getPosition()
const LuaVariant& variant = getVariant(L, 1);
if (variant.type == VARIANT_POSITION || variant.type == VARIANT_TARGETPOSITION) {
pushPosition(L, variant.pos);
} else {
pushPosition(L, Position());
}
return 1;
}
// Position
int LuaScriptInterface::luaPositionCreate(lua_State* L)
{
// Position([x = 0[, y = 0[, z = 0[, stackpos = 0]]]])
// Position([position])
if (lua_gettop(L) <= 1) {
pushPosition(L, Position());
return 1;
}
int32_t stackpos;
if (isTable(L, 2)) {
const Position& position = getPosition(L, 2, stackpos);
pushPosition(L, position, stackpos);
} else {
uint16_t x = getNumber<uint16_t>(L, 2, 0);
uint16_t y = getNumber<uint16_t>(L, 3, 0);
uint8_t z = getNumber<uint8_t>(L, 4, 0);
stackpos = getNumber<int32_t>(L, 5, 0);
pushPosition(L, Position(x, y, z), stackpos);
}
return 1;
}
int LuaScriptInterface::luaPositionAdd(lua_State* L)
{
// positionValue = position + positionEx
int32_t stackpos;
const Position& position = getPosition(L, 1, stackpos);
Position positionEx;
if (stackpos == 0) {
positionEx = getPosition(L, 2, stackpos);
} else {
positionEx = getPosition(L, 2);
}
pushPosition(L, position + positionEx, stackpos);
return 1;
}
int LuaScriptInterface::luaPositionSub(lua_State* L)
{
// positionValue = position - positionEx
int32_t stackpos;
const Position& position = getPosition(L, 1, stackpos);
Position positionEx;
if (stackpos == 0) {
positionEx = getPosition(L, 2, stackpos);
} else {
positionEx = getPosition(L, 2);
}
pushPosition(L, position - positionEx, stackpos);
return 1;
}
int LuaScriptInterface::luaPositionCompare(lua_State* L)
{
// position == positionEx
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
pushBoolean(L, position == positionEx);
return 1;
}
int LuaScriptInterface::luaPositionGetDistance(lua_State* L)
{
// position:getDistance(positionEx)
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
lua_pushnumber(L, std::max<int32_t>(
std::max<int32_t>(
std::abs(Position::getDistanceX(position, positionEx)),
std::abs(Position::getDistanceY(position, positionEx))
),
std::abs(Position::getDistanceZ(position, positionEx))
));
return 1;
}
int LuaScriptInterface::luaPositionIsSightClear(lua_State* L)
{
// position:isSightClear(positionEx[, sameFloor = true])
bool sameFloor = getBoolean(L, 3, true);
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
pushBoolean(L, g_game.isSightClear(position, positionEx, sameFloor));
return 1;
}
int LuaScriptInterface::luaPositionSendMagicEffect(lua_State* L)
{
// position:sendMagicEffect(magicEffect[, player = nullptr])
SpectatorVec spectators;
if (lua_gettop(L) >= 3) {
Player* player = getPlayer(L, 3);
if (player) {
spectators.emplace_back(player);
}
}
MagicEffectClasses magicEffect = getNumber<MagicEffectClasses>(L, 2);
const Position& position = getPosition(L, 1);
if (!spectators.empty()) {
Game::addMagicEffect(spectators, position, magicEffect);
} else {
g_game.addMagicEffect(position, magicEffect);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPositionSendDistanceEffect(lua_State* L)
{
// position:sendDistanceEffect(positionEx, distanceEffect[, player = nullptr])
SpectatorVec spectators;
if (lua_gettop(L) >= 4) {
Player* player = getPlayer(L, 4);
if (player) {
spectators.emplace_back(player);
}
}
ShootType_t distanceEffect = getNumber<ShootType_t>(L, 3);
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
if (!spectators.empty()) {
Game::addDistanceEffect(spectators, position, positionEx, distanceEffect);
} else {
g_game.addDistanceEffect(position, positionEx, distanceEffect);
}
pushBoolean(L, true);
return 1;
}
// Tile
int LuaScriptInterface::luaTileCreate(lua_State* L)
{
// Tile(x, y, z)
// Tile(position)
Tile* tile;
if (isTable(L, 2)) {
tile = g_game.map.getTile(getPosition(L, 2));
} else {
uint8_t z = getNumber<uint8_t>(L, 4);
uint16_t y = getNumber<uint16_t>(L, 3);
uint16_t x = getNumber<uint16_t>(L, 2);
tile = g_game.map.getTile(x, y, z);
}
if (tile) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileRemove(lua_State* L)
{
// tile:remove()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
g_game.map.removeTile(tile->getPosition());
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaTileGetPosition(lua_State* L)
{
// tile:getPosition()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
pushPosition(L, tile->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetGround(lua_State* L)
{
// tile:getGround()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile && tile->getGround()) {
pushUserdata<Item>(L, tile->getGround());
setItemMetatable(L, -1, tile->getGround());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetThing(lua_State* L)
{
// tile:getThing(index)
int32_t index = getNumber<int32_t>(L, 2);
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = tile->getThing(index);
if (!thing) {
lua_pushnil(L);
return 1;
}
if (Creature* creature = thing->getCreature()) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else if (Item* item = thing->getItem()) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetThingCount(lua_State* L)
{
// tile:getThingCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
lua_pushnumber(L, tile->getThingCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopVisibleThing(lua_State* L)
{
// tile:getTopVisibleThing(creature)
Creature* creature = getCreature(L, 2);
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = tile->getTopVisibleThing(creature);
if (!thing) {
lua_pushnil(L);
return 1;
}
if (Creature* visibleCreature = thing->getCreature()) {
pushUserdata<Creature>(L, visibleCreature);
setCreatureMetatable(L, -1, visibleCreature);
} else if (Item* visibleItem = thing->getItem()) {
pushUserdata<Item>(L, visibleItem);
setItemMetatable(L, -1, visibleItem);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopTopItem(lua_State* L)
{
// tile:getTopTopItem()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item = tile->getTopTopItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopDownItem(lua_State* L)
{
// tile:getTopDownItem()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item = tile->getTopDownItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetFieldItem(lua_State* L)
{
// tile:getFieldItem()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item = tile->getFieldItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetItemById(lua_State* L)
{
// tile:getItemById(itemId[, subType = -1])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
Item* item = g_game.findItemOfType(tile, itemId, false, subType);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetItemByType(lua_State* L)
{
// tile:getItemByType(itemType)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
bool found;
ItemTypes_t itemType = getNumber<ItemTypes_t>(L, 2);
switch (itemType) {
case ITEM_TYPE_TELEPORT:
found = tile->hasFlag(TILESTATE_TELEPORT);
break;
case ITEM_TYPE_MAGICFIELD:
found = tile->hasFlag(TILESTATE_MAGICFIELD);
break;
case ITEM_TYPE_MAILBOX:
found = tile->hasFlag(TILESTATE_MAILBOX);
break;
case ITEM_TYPE_TRASHHOLDER:
found = tile->hasFlag(TILESTATE_TRASHHOLDER);
break;
case ITEM_TYPE_BED:
found = tile->hasFlag(TILESTATE_BED);
break;
case ITEM_TYPE_DEPOT:
found = tile->hasFlag(TILESTATE_DEPOT);
break;
default:
found = true;
break;
}
if (!found) {
lua_pushnil(L);
return 1;
}
if (Item* item = tile->getGround()) {
const ItemType& it = Item::items[item->getID()];
if (it.type == itemType) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
}
if (const TileItemVector* items = tile->getItemList()) {
for (Item* item : *items) {
const ItemType& it = Item::items[item->getID()];
if (it.type == itemType) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
}
}
lua_pushnil(L);
return 1;
}
int LuaScriptInterface::luaTileGetItemByTopOrder(lua_State* L)
{
// tile:getItemByTopOrder(topOrder)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
int32_t topOrder = getNumber<int32_t>(L, 2);
Item* item = tile->getItemByTopOrder(topOrder);
if (!item) {
lua_pushnil(L);
return 1;
}
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
int LuaScriptInterface::luaTileGetItemCountById(lua_State* L)
{
// tile:getItemCountById(itemId[, subType = -1])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
lua_pushnumber(L, tile->getItemTypeCount(itemId, subType));
return 1;
}
int LuaScriptInterface::luaTileGetBottomCreature(lua_State* L)
{
// tile:getBottomCreature()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
const Creature* creature = tile->getBottomCreature();
if (!creature) {
lua_pushnil(L);
return 1;
}
pushUserdata<const Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
return 1;
}
int LuaScriptInterface::luaTileGetTopCreature(lua_State* L)
{
// tile:getTopCreature()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Creature* creature = tile->getTopCreature();
if (!creature) {
lua_pushnil(L);
return 1;
}
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
return 1;
}
int LuaScriptInterface::luaTileGetBottomVisibleCreature(lua_State* L)
{
// tile:getBottomVisibleCreature(creature)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Creature* creature = getCreature(L, 2);
if (!creature) {
lua_pushnil(L);
return 1;
}
const Creature* visibleCreature = tile->getBottomVisibleCreature(creature);
if (visibleCreature) {
pushUserdata<const Creature>(L, visibleCreature);
setCreatureMetatable(L, -1, visibleCreature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopVisibleCreature(lua_State* L)
{
// tile:getTopVisibleCreature(creature)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Creature* creature = getCreature(L, 2);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* visibleCreature = tile->getTopVisibleCreature(creature);
if (visibleCreature) {
pushUserdata<Creature>(L, visibleCreature);
setCreatureMetatable(L, -1, visibleCreature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetItems(lua_State* L)
{
// tile:getItems()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
TileItemVector* itemVector = tile->getItemList();
if (!itemVector) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, itemVector->size(), 0);
int index = 0;
for (Item* item : *itemVector) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaTileGetItemCount(lua_State* L)
{
// tile:getItemCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, tile->getItemCount());
return 1;
}
int LuaScriptInterface::luaTileGetDownItemCount(lua_State* L)
{
// tile:getDownItemCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
lua_pushnumber(L, tile->getDownItemCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopItemCount(lua_State* L)
{
// tile:getTopItemCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, tile->getTopItemCount());
return 1;
}
int LuaScriptInterface::luaTileGetCreatures(lua_State* L)
{
// tile:getCreatures()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
CreatureVector* creatureVector = tile->getCreatures();
if (!creatureVector) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, creatureVector->size(), 0);
int index = 0;
for (Creature* creature : *creatureVector) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaTileGetCreatureCount(lua_State* L)
{
// tile:getCreatureCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, tile->getCreatureCount());
return 1;
}
int LuaScriptInterface::luaTileHasProperty(lua_State* L)
{
// tile:hasProperty(property[, item])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item;
if (lua_gettop(L) >= 3) {
item = getUserdata<Item>(L, 3);
} else {
item = nullptr;
}
ITEMPROPERTY property = getNumber<ITEMPROPERTY>(L, 2);
if (item) {
pushBoolean(L, tile->hasProperty(item, property));
} else {
pushBoolean(L, tile->hasProperty(property));
}
return 1;
}
int LuaScriptInterface::luaTileGetThingIndex(lua_State* L)
{
// tile:getThingIndex(thing)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = getThing(L, 2);
if (thing) {
lua_pushnumber(L, tile->getThingIndex(thing));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileHasFlag(lua_State* L)
{
// tile:hasFlag(flag)
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
tileflags_t flag = getNumber<tileflags_t>(L, 2);
pushBoolean(L, tile->hasFlag(flag));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileQueryAdd(lua_State* L)
{
// tile:queryAdd(thing[, flags])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = getThing(L, 2);
if (thing) {
uint32_t flags = getNumber<uint32_t>(L, 3, 0);
lua_pushnumber(L, tile->queryAdd(0, *thing, 1, flags));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileAddItem(lua_State* L)
{
// tile:addItem(itemId[, count/subType = 1[, flags = 0]])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
uint32_t subType = getNumber<uint32_t>(L, 3, 1);
Item* item = Item::CreateItem(itemId, std::min<uint32_t>(subType, 100));
if (!item) {
lua_pushnil(L);
return 1;
}
uint32_t flags = getNumber<uint32_t>(L, 4, 0);
ReturnValue ret = g_game.internalAddItem(tile, item, INDEX_WHEREEVER, flags);
if (ret == RETURNVALUE_NOERROR) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
delete item;
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileAddItemEx(lua_State* L)
{
// tile:addItemEx(item[, flags = 0])
Item* item = getUserdata<Item>(L, 2);
if (!item) {
lua_pushnil(L);
return 1;
}
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
if (item->getParent() != VirtualCylinder::virtualCylinder) {
reportErrorFunc(L, "Item already has a parent");
lua_pushnil(L);
return 1;
}
uint32_t flags = getNumber<uint32_t>(L, 3, 0);
ReturnValue ret = g_game.internalAddItem(tile, item, INDEX_WHEREEVER, flags);
if (ret == RETURNVALUE_NOERROR) {
ScriptEnvironment::removeTempItem(item);
}
lua_pushnumber(L, ret);
return 1;
}
int LuaScriptInterface::luaTileGetHouse(lua_State* L)
{
// tile:getHouse()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
if (HouseTile* houseTile = dynamic_cast<HouseTile*>(tile)) {
pushUserdata<House>(L, houseTile->getHouse());
setMetatable(L, -1, "House");
} else {
lua_pushnil(L);
}
return 1;
}
// NetworkMessage
int LuaScriptInterface::luaNetworkMessageCreate(lua_State* L)
{
// NetworkMessage()
pushUserdata<NetworkMessage>(L, new NetworkMessage);
setMetatable(L, -1, "NetworkMessage");
return 1;
}
int LuaScriptInterface::luaNetworkMessageDelete(lua_State* L)
{
NetworkMessage** messagePtr = getRawUserdata<NetworkMessage>(L, 1);
if (messagePtr && *messagePtr) {
delete *messagePtr;
*messagePtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaNetworkMessageGetByte(lua_State* L)
{
// networkMessage:getByte()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->getByte());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetU16(lua_State* L)
{
// networkMessage:getU16()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->get<uint16_t>());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetU32(lua_State* L)
{
// networkMessage:getU32()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->get<uint32_t>());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetU64(lua_State* L)
{
// networkMessage:getU64()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->get<uint64_t>());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetString(lua_State* L)
{
// networkMessage:getString()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
pushString(L, message->getString());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetPosition(lua_State* L)
{
// networkMessage:getPosition()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
pushPosition(L, message->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddByte(lua_State* L)
{
// networkMessage:addByte(number)
uint8_t number = getNumber<uint8_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addByte(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddU16(lua_State* L)
{
// networkMessage:addU16(number)
uint16_t number = getNumber<uint16_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->add<uint16_t>(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddU32(lua_State* L)
{
// networkMessage:addU32(number)
uint32_t number = getNumber<uint32_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->add<uint32_t>(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddU64(lua_State* L)
{
// networkMessage:addU64(number)
uint64_t number = getNumber<uint64_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->add<uint64_t>(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddString(lua_State* L)
{
// networkMessage:addString(string)
const std::string& string = getString(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addString(string);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddPosition(lua_State* L)
{
// networkMessage:addPosition(position)
const Position& position = getPosition(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addPosition(position);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddDouble(lua_State* L)
{
// networkMessage:addDouble(number)
double number = getNumber<double>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addDouble(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddItem(lua_State* L)
{
// networkMessage:addItem(item)
Item* item = getUserdata<Item>(L, 2);
if (!item) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
lua_pushnil(L);
return 1;
}
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addItem(item);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddItemId(lua_State* L)
{
// networkMessage:addItemId(itemId)
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (!message) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
message->addItemId(itemId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaNetworkMessageReset(lua_State* L)
{
// networkMessage:reset()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->reset();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageSeek(lua_State* L)
{
// networkMessage:seek(position)
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message && isNumber(L, 2)) {
pushBoolean(L, message->setBufferPosition(getNumber<uint16_t>(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageTell(lua_State* L)
{
// networkMessage:tell()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->getBufferPosition() - message->INITIAL_BUFFER_POSITION);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageLength(lua_State* L)
{
// networkMessage:len()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->getLength());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageSkipBytes(lua_State* L)
{
// networkMessage:skipBytes(number)
int16_t number = getNumber<int16_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->skipBytes(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageSendToPlayer(lua_State* L)
{
// networkMessage:sendToPlayer(player)
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (!message) {
lua_pushnil(L);
return 1;
}
Player* player = getPlayer(L, 2);
if (player) {
player->sendNetworkMessage(*message);
pushBoolean(L, true);
} else {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
lua_pushnil(L);
}
return 1;
}
// ModalWindow
int LuaScriptInterface::luaModalWindowCreate(lua_State* L)
{
// ModalWindow(id, title, message)
const std::string& message = getString(L, 4);
const std::string& title = getString(L, 3);
uint32_t id = getNumber<uint32_t>(L, 2);
pushUserdata<ModalWindow>(L, new ModalWindow(id, title, message));
setMetatable(L, -1, "ModalWindow");
return 1;
}
int LuaScriptInterface::luaModalWindowDelete(lua_State* L)
{
ModalWindow** windowPtr = getRawUserdata<ModalWindow>(L, 1);
if (windowPtr && *windowPtr) {
delete *windowPtr;
*windowPtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaModalWindowGetId(lua_State* L)
{
// modalWindow:getId()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->id);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetTitle(lua_State* L)
{
// modalWindow:getTitle()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
pushString(L, window->title);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetMessage(lua_State* L)
{
// modalWindow:getMessage()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
pushString(L, window->message);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetTitle(lua_State* L)
{
// modalWindow:setTitle(text)
const std::string& text = getString(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->title = text;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetMessage(lua_State* L)
{
// modalWindow:setMessage(text)
const std::string& text = getString(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->message = text;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetButtonCount(lua_State* L)
{
// modalWindow:getButtonCount()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->buttons.size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetChoiceCount(lua_State* L)
{
// modalWindow:getChoiceCount()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->choices.size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowAddButton(lua_State* L)
{
// modalWindow:addButton(id, text)
const std::string& text = getString(L, 3);
uint8_t id = getNumber<uint8_t>(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->buttons.emplace_back(text, id);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowAddChoice(lua_State* L)
{
// modalWindow:addChoice(id, text)
const std::string& text = getString(L, 3);
uint8_t id = getNumber<uint8_t>(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->choices.emplace_back(text, id);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetDefaultEnterButton(lua_State* L)
{
// modalWindow:getDefaultEnterButton()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->defaultEnterButton);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetDefaultEnterButton(lua_State* L)
{
// modalWindow:setDefaultEnterButton(buttonId)
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->defaultEnterButton = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetDefaultEscapeButton(lua_State* L)
{
// modalWindow:getDefaultEscapeButton()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->defaultEscapeButton);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetDefaultEscapeButton(lua_State* L)
{
// modalWindow:setDefaultEscapeButton(buttonId)
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->defaultEscapeButton = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowHasPriority(lua_State* L)
{
// modalWindow:hasPriority()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
pushBoolean(L, window->priority);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetPriority(lua_State* L)
{
// modalWindow:setPriority(priority)
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->priority = getBoolean(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSendToPlayer(lua_State* L)
{
// modalWindow:sendToPlayer(player)
Player* player = getPlayer(L, 2);
if (!player) {
lua_pushnil(L);
return 1;
}
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
if (!player->hasModalWindowOpen(window->id)) {
player->sendModalWindow(*window);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Item
int LuaScriptInterface::luaItemCreate(lua_State* L)
{
// Item(uid)
uint32_t id = getNumber<uint32_t>(L, 2);
Item* item = getScriptEnv()->getItemByUID(id);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemIsItem(lua_State* L)
{
// item:isItem()
pushBoolean(L, getUserdata<const Item>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaItemGetParent(lua_State* L)
{
// item:getParent()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Cylinder* parent = item->getParent();
if (!parent) {
lua_pushnil(L);
return 1;
}
pushCylinder(L, parent);
return 1;
}
int LuaScriptInterface::luaItemGetTopParent(lua_State* L)
{
// item:getTopParent()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Cylinder* topParent = item->getTopParent();
if (!topParent) {
lua_pushnil(L);
return 1;
}
pushCylinder(L, topParent);
return 1;
}
int LuaScriptInterface::luaItemGetId(lua_State* L)
{
// item:getId()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemClone(lua_State* L)
{
// item:clone()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Item* clone = item->clone();
if (!clone) {
lua_pushnil(L);
return 1;
}
getScriptEnv()->addTempItem(clone);
clone->setParent(VirtualCylinder::virtualCylinder);
pushUserdata<Item>(L, clone);
setItemMetatable(L, -1, clone);
return 1;
}
int LuaScriptInterface::luaItemSplit(lua_State* L)
{
// item:split([count = 1])
Item** itemPtr = getRawUserdata<Item>(L, 1);
if (!itemPtr) {
lua_pushnil(L);
return 1;
}
Item* item = *itemPtr;
if (!item || !item->isStackable()) {
lua_pushnil(L);
return 1;
}
uint16_t count = std::min<uint16_t>(getNumber<uint16_t>(L, 2, 1), item->getItemCount());
uint16_t diff = item->getItemCount() - count;
Item* splitItem = item->clone();
if (!splitItem) {
lua_pushnil(L);
return 1;
}
splitItem->setItemCount(count);
ScriptEnvironment* env = getScriptEnv();
uint32_t uid = env->addThing(item);
Item* newItem = g_game.transformItem(item, item->getID(), diff);
if (item->isRemoved()) {
env->removeItemByUID(uid);
}
if (newItem && newItem != item) {
env->insertItem(uid, newItem);
}
*itemPtr = newItem;
splitItem->setParent(VirtualCylinder::virtualCylinder);
env->addTempItem(splitItem);
pushUserdata<Item>(L, splitItem);
setItemMetatable(L, -1, splitItem);
return 1;
}
int LuaScriptInterface::luaItemRemove(lua_State* L)
{
// item:remove([count = -1])
Item* item = getUserdata<Item>(L, 1);
if (item) {
int32_t count = getNumber<int32_t>(L, 2, -1);
pushBoolean(L, g_game.internalRemoveItem(item, count) == RETURNVALUE_NOERROR);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetUniqueId(lua_State* L)
{
// item:getUniqueId()
Item* item = getUserdata<Item>(L, 1);
if (item) {
uint32_t uniqueId = item->getUniqueId();
if (uniqueId == 0) {
uniqueId = getScriptEnv()->addThing(item);
}
lua_pushnumber(L, uniqueId);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetActionId(lua_State* L)
{
// item:getActionId()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getActionId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemSetActionId(lua_State* L)
{
// item:setActionId(actionId)
uint16_t actionId = getNumber<uint16_t>(L, 2);
Item* item = getUserdata<Item>(L, 1);
if (item) {
item->setActionId(actionId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetCount(lua_State* L)
{
// item:getCount()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getItemCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetCharges(lua_State* L)
{
// item:getCharges()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getCharges());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetFluidType(lua_State* L)
{
// item:getFluidType()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getFluidType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetWeight(lua_State* L)
{
// item:getWeight()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getWeight());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetSubType(lua_State* L)
{
// item:getSubType()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getSubType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetName(lua_State* L)
{
// item:getName()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushString(L, item->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetPluralName(lua_State* L)
{
// item:getPluralName()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushString(L, item->getPluralName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetArticle(lua_State* L)
{
// item:getArticle()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushString(L, item->getArticle());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetPosition(lua_State* L)
{
// item:getPosition()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushPosition(L, item->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetTile(lua_State* L)
{
// item:getTile()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Tile* tile = item->getTile();
if (tile) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemHasAttribute(lua_State* L)
{
// item:hasAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
pushBoolean(L, item->hasAttribute(attribute));
return 1;
}
int LuaScriptInterface::luaItemGetAttribute(lua_State* L)
{
// item:getAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
if (ItemAttributes::isIntAttrType(attribute)) {
lua_pushnumber(L, item->getIntAttr(attribute));
} else if (ItemAttributes::isStrAttrType(attribute)) {
pushString(L, item->getStrAttr(attribute));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemSetAttribute(lua_State* L)
{
// item:setAttribute(key, value)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
if (ItemAttributes::isIntAttrType(attribute)) {
if (attribute == ITEM_ATTRIBUTE_UNIQUEID) {
reportErrorFunc(L, "Attempt to set protected key \"uid\"");
pushBoolean(L, false);
return 1;
}
item->setIntAttr(attribute, getNumber<int32_t>(L, 3));
pushBoolean(L, true);
} else if (ItemAttributes::isStrAttrType(attribute)) {
item->setStrAttr(attribute, getString(L, 3));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemRemoveAttribute(lua_State* L)
{
// item:removeAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
bool ret = attribute != ITEM_ATTRIBUTE_UNIQUEID;
if (ret) {
item->removeAttribute(attribute);
} else {
reportErrorFunc(L, "Attempt to erase protected key \"uid\"");
}
pushBoolean(L, ret);
return 1;
}
int LuaScriptInterface::luaItemGetCustomAttribute(lua_State* L) {
// item:getCustomAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
const ItemAttributes::CustomAttribute* attr;
if (isNumber(L, 2)) {
attr = item->getCustomAttribute(getNumber<int64_t>(L, 2));
} else if (isString(L, 2)) {
attr = item->getCustomAttribute(getString(L, 2));
} else {
lua_pushnil(L);
return 1;
}
if (attr) {
attr->pushToLua(L);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemSetCustomAttribute(lua_State* L) {
// item:setCustomAttribute(key, value)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
std::string key;
if (isNumber(L, 2)) {
key = std::to_string(getNumber<int64_t>(L, 2));
} else if (isString(L, 2)) {
key = getString(L, 2);
} else {
lua_pushnil(L);
return 1;
}
ItemAttributes::CustomAttribute val;
if (isNumber(L, 3)) {
double tmp = getNumber<double>(L, 3);
if (std::floor(tmp) < tmp) {
val.set<double>(tmp);
} else {
val.set<int64_t>(tmp);
}
} else if (isString(L, 3)) {
val.set<std::string>(getString(L, 3));
} else if (isBoolean(L, 3)) {
val.set<bool>(getBoolean(L, 3));
} else {
lua_pushnil(L);
return 1;
}
item->setCustomAttribute(key, val);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaItemRemoveCustomAttribute(lua_State* L) {
// item:removeCustomAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
if (isNumber(L, 2)) {
pushBoolean(L, item->removeCustomAttribute(getNumber<int64_t>(L, 2)));
} else if (isString(L, 2)) {
pushBoolean(L, item->removeCustomAttribute(getString(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemMoveTo(lua_State* L)
{
// item:moveTo(position or cylinder[, flags])
Item** itemPtr = getRawUserdata<Item>(L, 1);
if (!itemPtr) {
lua_pushnil(L);
return 1;
}
Item* item = *itemPtr;
if (!item || item->isRemoved()) {
lua_pushnil(L);
return 1;
}
Cylinder* toCylinder;
if (isUserdata(L, 2)) {
const LuaDataType type = getUserdataType(L, 2);
switch (type) {
case LuaData_Container:
toCylinder = getUserdata<Container>(L, 2);
break;
case LuaData_Player:
toCylinder = getUserdata<Player>(L, 2);
break;
case LuaData_Tile:
toCylinder = getUserdata<Tile>(L, 2);
break;
default:
toCylinder = nullptr;
break;
}
} else {
toCylinder = g_game.map.getTile(getPosition(L, 2));
}
if (!toCylinder) {
lua_pushnil(L);
return 1;
}
if (item->getParent() == toCylinder) {
pushBoolean(L, true);
return 1;
}
uint32_t flags = getNumber<uint32_t>(L, 3, FLAG_NOLIMIT | FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE | FLAG_IGNORENOTMOVEABLE);
if (item->getParent() == VirtualCylinder::virtualCylinder) {
pushBoolean(L, g_game.internalAddItem(toCylinder, item, INDEX_WHEREEVER, flags) == RETURNVALUE_NOERROR);
} else {
Item* moveItem = nullptr;
ReturnValue ret = g_game.internalMoveItem(item->getParent(), toCylinder, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem, flags);
if (moveItem) {
*itemPtr = moveItem;
}
pushBoolean(L, ret == RETURNVALUE_NOERROR);
}
return 1;
}
int LuaScriptInterface::luaItemTransform(lua_State* L)
{
// item:transform(itemId[, count/subType = -1])
Item** itemPtr = getRawUserdata<Item>(L, 1);
if (!itemPtr) {
lua_pushnil(L);
return 1;
}
Item*& item = *itemPtr;
if (!item) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
pushBoolean(L, true);
return 1;
}
const ItemType& it = Item::items[itemId];
if (it.stackable) {
subType = std::min<int32_t>(subType, 100);
}
ScriptEnvironment* env = getScriptEnv();
uint32_t uid = env->addThing(item);
Item* newItem = g_game.transformItem(item, itemId, subType);
if (item->isRemoved()) {
env->removeItemByUID(uid);
}
if (newItem && newItem != item) {
env->insertItem(uid, newItem);
}
item = newItem;
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaItemDecay(lua_State* L)
{
// item:decay(decayId)
Item* item = getUserdata<Item>(L, 1);
if (item) {
if (isNumber(L, 2)) {
item->setDecayTo(getNumber<int32_t>(L, 2));
}
g_game.startDecay(item);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetDescription(lua_State* L)
{
// item:getDescription(distance)
Item* item = getUserdata<Item>(L, 1);
if (item) {
int32_t distance = getNumber<int32_t>(L, 2);
pushString(L, item->getDescription(distance));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetSpecialDescription(lua_State* L)
{
// item:getSpecialDescription()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushString(L, item->getSpecialDescription());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemHasProperty(lua_State* L)
{
// item:hasProperty(property)
Item* item = getUserdata<Item>(L, 1);
if (item) {
ITEMPROPERTY property = getNumber<ITEMPROPERTY>(L, 2);
pushBoolean(L, item->hasProperty(property));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemIsLoadedFromMap(lua_State* L)
{
// item:isLoadedFromMap()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushBoolean(L, item->isLoadedFromMap());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemSetStoreItem(lua_State* L)
{
// item:setStoreItem(storeItem)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
item->setStoreItem(getBoolean(L, 2, false));
return 1;
}
int LuaScriptInterface::luaItemIsStoreItem(lua_State* L)
{
// item:isStoreItem()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushBoolean(L, item->isStoreItem());
} else {
lua_pushnil(L);
}
return 1;
}
// Container
int LuaScriptInterface::luaContainerCreate(lua_State* L)
{
// Container(uid)
uint32_t id = getNumber<uint32_t>(L, 2);
Container* container = getScriptEnv()->getContainerByUID(id);
if (container) {
pushUserdata(L, container);
setMetatable(L, -1, "Container");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetSize(lua_State* L)
{
// container:getSize()
Container* container = getUserdata<Container>(L, 1);
if (container) {
lua_pushnumber(L, container->size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetCapacity(lua_State* L)
{
// container:getCapacity()
Container* container = getUserdata<Container>(L, 1);
if (container) {
lua_pushnumber(L, container->capacity());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetEmptySlots(lua_State* L)
{
// container:getEmptySlots([recursive = false])
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint32_t slots = container->capacity() - container->size();
bool recursive = getBoolean(L, 2, false);
if (recursive) {
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
if (Container* tmpContainer = (*it)->getContainer()) {
slots += tmpContainer->capacity() - tmpContainer->size();
}
}
}
lua_pushnumber(L, slots);
return 1;
}
int LuaScriptInterface::luaContainerGetItemHoldingCount(lua_State* L)
{
// container:getItemHoldingCount()
Container* container = getUserdata<Container>(L, 1);
if (container) {
lua_pushnumber(L, container->getItemHoldingCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetItem(lua_State* L)
{
// container:getItem(index)
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint32_t index = getNumber<uint32_t>(L, 2);
Item* item = container->getItemByIndex(index);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerHasItem(lua_State* L)
{
// container:hasItem(item)
Item* item = getUserdata<Item>(L, 2);
Container* container = getUserdata<Container>(L, 1);
if (container) {
pushBoolean(L, container->isHoldingItem(item));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerAddItem(lua_State* L)
{
// container:addItem(itemId[, count/subType = 1[, index = INDEX_WHEREEVER[, flags = 0]]])
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
uint32_t count = getNumber<uint32_t>(L, 3, 1);
const ItemType& it = Item::items[itemId];
if (it.stackable) {
count = std::min<uint16_t>(count, 100);
}
Item* item = Item::CreateItem(itemId, count);
if (!item) {
lua_pushnil(L);
return 1;
}
int32_t index = getNumber<int32_t>(L, 4, INDEX_WHEREEVER);
uint32_t flags = getNumber<uint32_t>(L, 5, 0);
ReturnValue ret = g_game.internalAddItem(container, item, index, flags);
if (ret == RETURNVALUE_NOERROR) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
delete item;
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerAddItemEx(lua_State* L)
{
// container:addItemEx(item[, index = INDEX_WHEREEVER[, flags = 0]])
Item* item = getUserdata<Item>(L, 2);
if (!item) {
lua_pushnil(L);
return 1;
}
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
if (item->getParent() != VirtualCylinder::virtualCylinder) {
reportErrorFunc(L, "Item already has a parent");
lua_pushnil(L);
return 1;
}
int32_t index = getNumber<int32_t>(L, 3, INDEX_WHEREEVER);
uint32_t flags = getNumber<uint32_t>(L, 4, 0);
ReturnValue ret = g_game.internalAddItem(container, item, index, flags);
if (ret == RETURNVALUE_NOERROR) {
ScriptEnvironment::removeTempItem(item);
}
lua_pushnumber(L, ret);
return 1;
}
int LuaScriptInterface::luaContainerGetCorpseOwner(lua_State* L)
{
// container:getCorpseOwner()
Container* container = getUserdata<Container>(L, 1);
if (container) {
lua_pushnumber(L, container->getCorpseOwner());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetItemCountById(lua_State* L)
{
// container:getItemCountById(itemId[, subType = -1])
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
lua_pushnumber(L, container->getItemTypeCount(itemId, subType));
return 1;
}
int LuaScriptInterface::luaContainerGetContentDescription(lua_State* L)
{
// container:getContentDescription()
Container* container = getUserdata<Container>(L, 1);
if (container) {
pushString(L, container->getContentDescription());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetItems(lua_State* L)
{
// container:getItems([recursive = false])
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
bool recursive = getBoolean(L, 2, false);
std::vector<Item*> items = container->getItems(recursive);
lua_createtable(L, items.size(), 0);
int index = 0;
for (Item* item : items) {
pushUserdata(L, item);
setItemMetatable(L, -1, item);
lua_rawseti(L, -2, ++index);
}
return 1;
}
// Teleport
int LuaScriptInterface::luaTeleportCreate(lua_State* L)
{
// Teleport(uid)
uint32_t id = getNumber<uint32_t>(L, 2);
Item* item = getScriptEnv()->getItemByUID(id);
if (item && item->getTeleport()) {
pushUserdata(L, item);
setMetatable(L, -1, "Teleport");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTeleportGetDestination(lua_State* L)
{
// teleport:getDestination()
Teleport* teleport = getUserdata<Teleport>(L, 1);
if (teleport) {
pushPosition(L, teleport->getDestPos());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTeleportSetDestination(lua_State* L)
{
// teleport:setDestination(position)
Teleport* teleport = getUserdata<Teleport>(L, 1);
if (teleport) {
teleport->setDestPos(getPosition(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Creature
int LuaScriptInterface::luaCreatureCreate(lua_State* L)
{
// Creature(id or name or userdata)
Creature* creature;
if (isNumber(L, 2)) {
creature = g_game.getCreatureByID(getNumber<uint32_t>(L, 2));
} else if (isString(L, 2)) {
creature = g_game.getCreatureByName(getString(L, 2));
} else if (isUserdata(L, 2)) {
LuaDataType type = getUserdataType(L, 2);
if (type != LuaData_Player && type != LuaData_Monster && type != LuaData_Npc) {
lua_pushnil(L);
return 1;
}
creature = getUserdata<Creature>(L, 2);
} else {
creature = nullptr;
}
if (creature) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetEvents(lua_State* L)
{
// creature:getEvents(type)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
CreatureEventType_t eventType = getNumber<CreatureEventType_t>(L, 2);
const auto& eventList = creature->getCreatureEvents(eventType);
lua_createtable(L, eventList.size(), 0);
int index = 0;
for (CreatureEvent* event : eventList) {
pushString(L, event->getName());
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaCreatureRegisterEvent(lua_State* L)
{
// creature:registerEvent(name)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
const std::string& name = getString(L, 2);
pushBoolean(L, creature->registerCreatureEvent(name));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureUnregisterEvent(lua_State* L)
{
// creature:unregisterEvent(name)
const std::string& name = getString(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->unregisterCreatureEvent(name));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureIsRemoved(lua_State* L)
{
// creature:isRemoved()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->isRemoved());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureIsCreature(lua_State* L)
{
// creature:isCreature()
pushBoolean(L, getUserdata<const Creature>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaCreatureIsInGhostMode(lua_State* L)
{
// creature:isInGhostMode()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->isInGhostMode());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureIsHealthHidden(lua_State* L)
{
// creature:isHealthHidden()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->isHealthHidden());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureIsMovementBlocked(lua_State* L)
{
// creature:isMovementBlocked()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->isMovementBlocked());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureCanSee(lua_State* L)
{
// creature:canSee(position)
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
const Position& position = getPosition(L, 2);
pushBoolean(L, creature->canSee(position));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureCanSeeCreature(lua_State* L)
{
// creature:canSeeCreature(creature)
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
const Creature* otherCreature = getCreature(L, 2);
if (!otherCreature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, creature->canSeeCreature(otherCreature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureCanSeeGhostMode(lua_State* L)
{
// creature:canSeeGhostMode(creature)
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
const Creature* otherCreature = getCreature(L, 2);
if (!otherCreature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, creature->canSeeGhostMode(otherCreature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureCanSeeInvisibility(lua_State* L)
{
// creature:canSeeInvisibility()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->canSeeInvisibility());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetParent(lua_State* L)
{
// creature:getParent()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Cylinder* parent = creature->getParent();
if (!parent) {
lua_pushnil(L);
return 1;
}
pushCylinder(L, parent);
return 1;
}
int LuaScriptInterface::luaCreatureGetId(lua_State* L)
{
// creature:getId()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetName(lua_State* L)
{
// creature:getName()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushString(L, creature->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetTarget(lua_State* L)
{
// creature:getTarget()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* target = creature->getAttackedCreature();
if (target) {
pushUserdata<Creature>(L, target);
setCreatureMetatable(L, -1, target);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetTarget(lua_State* L)
{
// creature:setTarget(target)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->setAttackedCreature(getCreature(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetFollowCreature(lua_State* L)
{
// creature:getFollowCreature()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* followCreature = creature->getFollowCreature();
if (followCreature) {
pushUserdata<Creature>(L, followCreature);
setCreatureMetatable(L, -1, followCreature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetFollowCreature(lua_State* L)
{
// creature:setFollowCreature(followedCreature)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->setFollowCreature(getCreature(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetMaster(lua_State* L)
{
// creature:getMaster()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* master = creature->getMaster();
if (!master) {
lua_pushnil(L);
return 1;
}
pushUserdata<Creature>(L, master);
setCreatureMetatable(L, -1, master);
return 1;
}
int LuaScriptInterface::luaCreatureSetMaster(lua_State* L)
{
// creature:setMaster(master)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
pushBoolean(L, creature->setMaster(getCreature(L, 2)));
g_game.updateCreatureType(creature);
return 1;
}
int LuaScriptInterface::luaCreatureGetLight(lua_State* L)
{
// creature:getLight()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
LightInfo lightInfo = creature->getCreatureLight();
lua_pushnumber(L, lightInfo.level);
lua_pushnumber(L, lightInfo.color);
return 2;
}
int LuaScriptInterface::luaCreatureSetLight(lua_State* L)
{
// creature:setLight(color, level)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
LightInfo light;
light.color = getNumber<uint8_t>(L, 2);
light.level = getNumber<uint8_t>(L, 3);
creature->setCreatureLight(light);
g_game.changeLight(creature);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureGetSpeed(lua_State* L)
{
// creature:getSpeed()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetBaseSpeed(lua_State* L)
{
// creature:getBaseSpeed()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getBaseSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureChangeSpeed(lua_State* L)
{
// creature:changeSpeed(delta)
Creature* creature = getCreature(L, 1);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
int32_t delta = getNumber<int32_t>(L, 2);
g_game.changeSpeed(creature, delta);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureSetDropLoot(lua_State* L)
{
// creature:setDropLoot(doDrop)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setDropLoot(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetSkillLoss(lua_State* L)
{
// creature:setSkillLoss(skillLoss)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setSkillLoss(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetPosition(lua_State* L)
{
// creature:getPosition()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushPosition(L, creature->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetTile(lua_State* L)
{
// creature:getTile()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Tile* tile = creature->getTile();
if (tile) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetDirection(lua_State* L)
{
// creature:getDirection()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getDirection());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetDirection(lua_State* L)
{
// creature:setDirection(direction)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushBoolean(L, g_game.internalCreatureTurn(creature, getNumber<Direction>(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetHealth(lua_State* L)
{
// creature:getHealth()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getHealth());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetHealth(lua_State* L)
{
// creature:setHealth(health)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
creature->health = std::min<int32_t>(getNumber<uint32_t>(L, 2), creature->healthMax);
g_game.addCreatureHealth(creature);
Player* player = creature->getPlayer();
if (player) {
player->sendStats();
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureAddHealth(lua_State* L)
{
// creature:addHealth(healthChange)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
CombatDamage damage;
damage.primary.value = getNumber<int32_t>(L, 2);
if (damage.primary.value >= 0) {
damage.primary.type = COMBAT_HEALING;
} else {
damage.primary.type = COMBAT_UNDEFINEDDAMAGE;
}
pushBoolean(L, g_game.combatChangeHealth(nullptr, creature, damage));
return 1;
}
int LuaScriptInterface::luaCreatureGetMaxHealth(lua_State* L)
{
// creature:getMaxHealth()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getMaxHealth());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetMaxHealth(lua_State* L)
{
// creature:setMaxHealth(maxHealth)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
creature->healthMax = getNumber<uint32_t>(L, 2);
creature->health = std::min<int32_t>(creature->health, creature->healthMax);
g_game.addCreatureHealth(creature);
Player* player = creature->getPlayer();
if (player) {
player->sendStats();
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureSetHiddenHealth(lua_State* L)
{
// creature:setHiddenHealth(hide)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setHiddenHealth(getBoolean(L, 2));
g_game.addCreatureHealth(creature);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetMovementBlocked(lua_State* L)
{
// creature:setMovementBlocked(state)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setMovementBlocked(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetSkull(lua_State* L)
{
// creature:getSkull()
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getSkull());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetSkull(lua_State* L)
{
// creature:setSkull(skull)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setSkull(getNumber<Skulls_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetOutfit(lua_State* L)
{
// creature:getOutfit()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushOutfit(L, creature->getCurrentOutfit());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetOutfit(lua_State* L)
{
// creature:setOutfit(outfit)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->defaultOutfit = getOutfit(L, 2);
g_game.internalCreatureChangeOutfit(creature, creature->defaultOutfit);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetCondition(lua_State* L)
{
// creature:getCondition(conditionType[, conditionId = CONDITIONID_COMBAT[, subId = 0]])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2);
ConditionId_t conditionId = getNumber<ConditionId_t>(L, 3, CONDITIONID_COMBAT);
uint32_t subId = getNumber<uint32_t>(L, 4, 0);
Condition* condition = creature->getCondition(conditionType, conditionId, subId);
if (condition) {
pushUserdata<Condition>(L, condition);
setWeakMetatable(L, -1, "Condition");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureAddCondition(lua_State* L)
{
// creature:addCondition(condition[, force = false])
Creature* creature = getUserdata<Creature>(L, 1);
Condition* condition = getUserdata<Condition>(L, 2);
if (creature && condition) {
bool force = getBoolean(L, 3, false);
pushBoolean(L, creature->addCondition(condition->clone(), force));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureRemoveCondition(lua_State* L)
{
// creature:removeCondition(conditionType[, conditionId = CONDITIONID_COMBAT[, subId = 0[, force = false]]])
// creature:removeCondition(condition[, force = false])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Condition* condition = nullptr;
bool force = false;
if (isUserdata(L, 2)) {
condition = getUserdata<Condition>(L, 2);
force = getBoolean(L, 3, false);
} else {
ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2);
ConditionId_t conditionId = getNumber<ConditionId_t>(L, 3, CONDITIONID_COMBAT);
uint32_t subId = getNumber<uint32_t>(L, 4, 0);
condition = creature->getCondition(conditionType, conditionId, subId);
force = getBoolean(L, 5, false);
}
if (condition) {
creature->removeCondition(condition, force);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureHasCondition(lua_State* L)
{
// creature:hasCondition(conditionType[, subId = 0])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2);
uint32_t subId = getNumber<uint32_t>(L, 3, 0);
pushBoolean(L, creature->hasCondition(conditionType, subId));
return 1;
}
int LuaScriptInterface::luaCreatureIsImmune(lua_State* L)
{
// creature:isImmune(condition or conditionType)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
if (isNumber(L, 2)) {
pushBoolean(L, creature->isImmune(getNumber<ConditionType_t>(L, 2)));
} else if (Condition* condition = getUserdata<Condition>(L, 2)) {
pushBoolean(L, creature->isImmune(condition->getType()));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureRemove(lua_State* L)
{
// creature:remove()
Creature** creaturePtr = getRawUserdata<Creature>(L, 1);
if (!creaturePtr) {
lua_pushnil(L);
return 1;
}
Creature* creature = *creaturePtr;
if (!creature) {
lua_pushnil(L);
return 1;
}
Player* player = creature->getPlayer();
if (player) {
player->kickPlayer(true);
} else {
g_game.removeCreature(creature);
}
*creaturePtr = nullptr;
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureTeleportTo(lua_State* L)
{
// creature:teleportTo(position[, pushMovement = false])
bool pushMovement = getBoolean(L, 3, false);
const Position& position = getPosition(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
const Position oldPosition = creature->getPosition();
if (g_game.internalTeleport(creature, position, pushMovement) != RETURNVALUE_NOERROR) {
pushBoolean(L, false);
return 1;
}
if (pushMovement) {
if (oldPosition.x == position.x) {
if (oldPosition.y < position.y) {
g_game.internalCreatureTurn(creature, DIRECTION_SOUTH);
} else {
g_game.internalCreatureTurn(creature, DIRECTION_NORTH);
}
} else if (oldPosition.x > position.x) {
g_game.internalCreatureTurn(creature, DIRECTION_WEST);
} else if (oldPosition.x < position.x) {
g_game.internalCreatureTurn(creature, DIRECTION_EAST);
}
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureSay(lua_State* L)
{
// creature:say(text[, type = TALKTYPE_MONSTER_SAY[, ghost = false[, target = nullptr[, position]]]])
int parameters = lua_gettop(L);
Position position;
if (parameters >= 6) {
position = getPosition(L, 6);
if (!position.x || !position.y) {
reportErrorFunc(L, "Invalid position specified.");
pushBoolean(L, false);
return 1;
}
}
Creature* target = nullptr;
if (parameters >= 5) {
target = getCreature(L, 5);
}
bool ghost = getBoolean(L, 4, false);
SpeakClasses type = getNumber<SpeakClasses>(L, 3, TALKTYPE_MONSTER_SAY);
const std::string& text = getString(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
SpectatorVec spectators;
if (target) {
spectators.emplace_back(target);
}
if (position.x != 0) {
pushBoolean(L, g_game.internalCreatureSay(creature, type, text, ghost, &spectators, &position));
} else {
pushBoolean(L, g_game.internalCreatureSay(creature, type, text, ghost, &spectators));
}
return 1;
}
int LuaScriptInterface::luaCreatureGetDamageMap(lua_State* L)
{
// creature:getDamageMap()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, creature->damageMap.size(), 0);
for (const auto& damageEntry : creature->damageMap) {
lua_createtable(L, 0, 2);
setField(L, "total", damageEntry.second.total);
setField(L, "ticks", damageEntry.second.ticks);
lua_rawseti(L, -2, damageEntry.first);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetSummons(lua_State* L)
{
// creature:getSummons()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, creature->getSummonCount(), 0);
int index = 0;
for (Creature* summon : creature->getSummons()) {
pushUserdata<Creature>(L, summon);
setCreatureMetatable(L, -1, summon);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetDescription(lua_State* L)
{
// creature:getDescription(distance)
int32_t distance = getNumber<int32_t>(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushString(L, creature->getDescription(distance));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetPathTo(lua_State* L)
{
// creature:getPathTo(pos[, minTargetDist = 0[, maxTargetDist = 1[, fullPathSearch = true[, clearSight = true[, maxSearchDist = 0]]]]])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
const Position& position = getPosition(L, 2);
FindPathParams fpp;
fpp.minTargetDist = getNumber<int32_t>(L, 3, 0);
fpp.maxTargetDist = getNumber<int32_t>(L, 4, 1);
fpp.fullPathSearch = getBoolean(L, 5, fpp.fullPathSearch);
fpp.clearSight = getBoolean(L, 6, fpp.clearSight);
fpp.maxSearchDist = getNumber<int32_t>(L, 7, fpp.maxSearchDist);
std::vector<Direction> dirList;
if (creature->getPathTo(position, dirList, fpp)) {
lua_newtable(L);
int index = 0;
for (auto it = dirList.rbegin(); it != dirList.rend(); ++it) {
lua_pushnumber(L, *it);
lua_rawseti(L, -2, ++index);
}
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaCreatureMove(lua_State* L)
{
// creature:move(direction)
// creature:move(tile[, flags = 0])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
if (isNumber(L, 2)) {
Direction direction = getNumber<Direction>(L, 2);
if (direction > DIRECTION_LAST) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, g_game.internalMoveCreature(creature, direction, FLAG_NOLIMIT));
} else {
Tile* tile = getUserdata<Tile>(L, 2);
if (!tile) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, g_game.internalMoveCreature(*creature, *tile, getNumber<uint32_t>(L, 3)));
}
return 1;
}
int LuaScriptInterface::luaCreatureGetZone(lua_State* L)
{
// creature:getZone()
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getZone());
} else {
lua_pushnil(L);
}
return 1;
}
// Player
int LuaScriptInterface::luaPlayerCreate(lua_State* L)
{
// Player(id or guid or name or userdata)
Player* player;
if (isNumber(L, 2)) {
uint32_t id = getNumber<uint32_t>(L, 2);
if (id >= 0x10000000 && id <= Player::playerAutoID) {
player = g_game.getPlayerByID(id);
} else {
player = g_game.getPlayerByGUID(id);
}
} else if (isString(L, 2)) {
ReturnValue ret = g_game.getPlayerByNameWildcard(getString(L, 2), player);
if (ret != RETURNVALUE_NOERROR) {
lua_pushnil(L);
lua_pushnumber(L, ret);
return 2;
}
} else if (isUserdata(L, 2)) {
if (getUserdataType(L, 2) != LuaData_Player) {
lua_pushnil(L);
return 1;
}
player = getUserdata<Player>(L, 2);
} else {
player = nullptr;
}
if (player) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerIsPlayer(lua_State* L)
{
// player:isPlayer()
pushBoolean(L, getUserdata<const Player>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaPlayerGetGuid(lua_State* L)
{
// player:getGuid()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getGUID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetIp(lua_State* L)
{
// player:getIp()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getIP());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetAccountId(lua_State* L)
{
// player:getAccountId()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getAccount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetLastLoginSaved(lua_State* L)
{
// player:getLastLoginSaved()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getLastLoginSaved());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetLastLogout(lua_State* L)
{
// player:getLastLogout()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getLastLogout());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetAccountType(lua_State* L)
{
// player:getAccountType()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getAccountType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetAccountType(lua_State* L)
{
// player:setAccountType(accountType)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->accountType = getNumber<AccountType_t>(L, 2);
IOLoginData::setAccountType(player->getAccount(), player->accountType);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetCapacity(lua_State* L)
{
// player:getCapacity()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getCapacity());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetCapacity(lua_State* L)
{
// player:setCapacity(capacity)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->capacity = getNumber<uint32_t>(L, 2);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetFreeCapacity(lua_State* L)
{
// player:getFreeCapacity()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getFreeCapacity());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetDepotChest(lua_State* L)
{
// player:getDepotChest(depotId[, autoCreate = false])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint32_t depotId = getNumber<uint32_t>(L, 2);
bool autoCreate = getBoolean(L, 3, false);
DepotChest* depotChest = player->getDepotChest(depotId, autoCreate);
if (depotChest) {
player->setLastDepotId(depotId); // FIXME: workaround for #2251
pushUserdata<Item>(L, depotChest);
setItemMetatable(L, -1, depotChest);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetInbox(lua_State* L)
{
// player:getInbox()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Inbox* inbox = player->getInbox();
if (inbox) {
pushUserdata<Item>(L, inbox);
setItemMetatable(L, -1, inbox);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkullTime(lua_State* L)
{
// player:getSkullTime()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSkullTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetSkullTime(lua_State* L)
{
// player:setSkullTime(skullTime)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setSkullTicks(getNumber<int64_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetDeathPenalty(lua_State* L)
{
// player:getDeathPenalty()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getLostPercent() * 100);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetExperience(lua_State* L)
{
// player:getExperience()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getExperience());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddExperience(lua_State* L)
{
// player:addExperience(experience[, sendText = false])
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint64_t experience = getNumber<uint64_t>(L, 2);
bool sendText = getBoolean(L, 3, false);
player->addExperience(nullptr, experience, sendText);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveExperience(lua_State* L)
{
// player:removeExperience(experience[, sendText = false])
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint64_t experience = getNumber<uint64_t>(L, 2);
bool sendText = getBoolean(L, 3, false);
player->removeExperience(experience, sendText);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetLevel(lua_State* L)
{
// player:getLevel()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getLevel());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetMagicLevel(lua_State* L)
{
// player:getMagicLevel()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getMagicLevel());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBaseMagicLevel(lua_State* L)
{
// player:getBaseMagicLevel()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getBaseMagicLevel());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetMana(lua_State* L)
{
// player:getMana()
const Player* player = getUserdata<const Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getMana());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddMana(lua_State* L)
{
// player:addMana(manaChange[, animationOnLoss = false])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
int32_t manaChange = getNumber<int32_t>(L, 2);
bool animationOnLoss = getBoolean(L, 3, false);
if (!animationOnLoss && manaChange < 0) {
player->changeMana(manaChange);
} else {
CombatDamage damage;
damage.primary.value = manaChange;
damage.origin = ORIGIN_NONE;
g_game.combatChangeMana(nullptr, player, damage);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetMaxMana(lua_State* L)
{
// player:getMaxMana()
const Player* player = getUserdata<const Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getMaxMana());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetMaxMana(lua_State* L)
{
// player:setMaxMana(maxMana)
Player* player = getPlayer(L, 1);
if (player) {
player->manaMax = getNumber<int32_t>(L, 2);
player->mana = std::min<int32_t>(player->mana, player->manaMax);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetManaSpent(lua_State* L)
{
// player:getManaSpent()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSpentMana());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddManaSpent(lua_State* L)
{
// player:addManaSpent(amount)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->addManaSpent(getNumber<uint64_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveManaSpent(lua_State* L)
{
// player:removeManaSpent(amount[, notify = true])
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->removeManaSpent(getNumber<uint64_t>(L, 2), getBoolean(L, 3, true));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBaseMaxHealth(lua_State* L)
{
// player:getBaseMaxHealth()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->healthMax);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBaseMaxMana(lua_State* L)
{
// player:getBaseMaxMana()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->manaMax);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkillLevel(lua_State* L)
{
// player:getSkillLevel(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->skills[skillType].level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetEffectiveSkillLevel(lua_State* L)
{
// player:getEffectiveSkillLevel(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->getSkillLevel(skillType));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkillPercent(lua_State* L)
{
// player:getSkillPercent(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->skills[skillType].percent);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkillTries(lua_State* L)
{
// player:getSkillTries(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->skills[skillType].tries);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddSkillTries(lua_State* L)
{
// player:addSkillTries(skillType, tries)
Player* player = getUserdata<Player>(L, 1);
if (player) {
skills_t skillType = getNumber<skills_t>(L, 2);
uint64_t tries = getNumber<uint64_t>(L, 3);
player->addSkillAdvance(skillType, tries);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveSkillTries(lua_State* L)
{
// player:removeSkillTries(skillType, tries[, notify = true])
Player* player = getUserdata<Player>(L, 1);
if (player) {
skills_t skillType = getNumber<skills_t>(L, 2);
uint64_t tries = getNumber<uint64_t>(L, 3);
player->removeSkillTries(skillType, tries, getBoolean(L, 4, true));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSpecialSkill(lua_State* L)
{
// player:getSpecialSkill(specialSkillType)
SpecialSkills_t specialSkillType = getNumber<SpecialSkills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && specialSkillType <= SPECIALSKILL_LAST) {
lua_pushnumber(L, player->getSpecialSkill(specialSkillType));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddSpecialSkill(lua_State* L)
{
// player:addSpecialSkill(specialSkillType, value)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
SpecialSkills_t specialSkillType = getNumber<SpecialSkills_t>(L, 2);
if (specialSkillType > SPECIALSKILL_LAST) {
lua_pushnil(L);
return 1;
}
player->setVarSpecialSkill(specialSkillType, getNumber<int32_t>(L, 3));
player->sendSkills();
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerAddOfflineTrainingTime(lua_State* L)
{
// player:addOfflineTrainingTime(time)
Player* player = getUserdata<Player>(L, 1);
if (player) {
int32_t time = getNumber<int32_t>(L, 2);
player->addOfflineTrainingTime(time);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetOfflineTrainingTime(lua_State* L)
{
// player:getOfflineTrainingTime()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getOfflineTrainingTime());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveOfflineTrainingTime(lua_State* L)
{
// player:removeOfflineTrainingTime(time)
Player* player = getUserdata<Player>(L, 1);
if (player) {
int32_t time = getNumber<int32_t>(L, 2);
player->removeOfflineTrainingTime(time);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddOfflineTrainingTries(lua_State* L)
{
// player:addOfflineTrainingTries(skillType, tries)
Player* player = getUserdata<Player>(L, 1);
if (player) {
skills_t skillType = getNumber<skills_t>(L, 2);
uint64_t tries = getNumber<uint64_t>(L, 3);
pushBoolean(L, player->addOfflineTrainingTries(skillType, tries));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetOfflineTrainingSkill(lua_State* L)
{
// player:getOfflineTrainingSkill()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getOfflineTrainingSkill());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetOfflineTrainingSkill(lua_State* L)
{
// player:setOfflineTrainingSkill(skillId)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint32_t skillId = getNumber<uint32_t>(L, 2);
player->setOfflineTrainingSkill(skillId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetItemCount(lua_State* L)
{
// player:getItemCount(itemId[, subType = -1])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
lua_pushnumber(L, player->getItemTypeCount(itemId, subType));
return 1;
}
int LuaScriptInterface::luaPlayerGetItemById(lua_State* L)
{
// player:getItemById(itemId, deepSearch[, subType = -1])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
bool deepSearch = getBoolean(L, 3);
int32_t subType = getNumber<int32_t>(L, 4, -1);
Item* item = g_game.findItemOfType(player, itemId, deepSearch, subType);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetVocation(lua_State* L)
{
// player:getVocation()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushUserdata<Vocation>(L, player->getVocation());
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetVocation(lua_State* L)
{
// player:setVocation(id or name or userdata)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Vocation* vocation;
if (isNumber(L, 2)) {
vocation = g_vocations.getVocation(getNumber<uint16_t>(L, 2));
} else if (isString(L, 2)) {
vocation = g_vocations.getVocation(g_vocations.getVocationId(getString(L, 2)));
} else if (isUserdata(L, 2)) {
vocation = getUserdata<Vocation>(L, 2);
} else {
vocation = nullptr;
}
if (!vocation) {
pushBoolean(L, false);
return 1;
}
player->setVocation(vocation->getId());
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetSex(lua_State* L)
{
// player:getSex()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSex());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetSex(lua_State* L)
{
// player:setSex(newSex)
Player* player = getUserdata<Player>(L, 1);
if (player) {
PlayerSex_t newSex = getNumber<PlayerSex_t>(L, 2);
player->setSex(newSex);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetTown(lua_State* L)
{
// player:getTown()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushUserdata<Town>(L, player->getTown());
setMetatable(L, -1, "Town");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetTown(lua_State* L)
{
// player:setTown(town)
Town* town = getUserdata<Town>(L, 2);
if (!town) {
pushBoolean(L, false);
return 1;
}
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setTown(town);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetGuild(lua_State* L)
{
// player:getGuild()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Guild* guild = player->getGuild();
if (!guild) {
lua_pushnil(L);
return 1;
}
pushUserdata<Guild>(L, guild);
setMetatable(L, -1, "Guild");
return 1;
}
int LuaScriptInterface::luaPlayerSetGuild(lua_State* L)
{
// player:setGuild(guild)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
player->setGuild(getUserdata<Guild>(L, 2));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetGuildLevel(lua_State* L)
{
// player:getGuildLevel()
Player* player = getUserdata<Player>(L, 1);
if (player && player->getGuild()) {
lua_pushnumber(L, player->getGuildRank()->level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetGuildLevel(lua_State* L)
{
// player:setGuildLevel(level)
uint8_t level = getNumber<uint8_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (!player || !player->getGuild()) {
lua_pushnil(L);
return 1;
}
GuildRank_ptr rank = player->getGuild()->getRankByLevel(level);
if (!rank) {
pushBoolean(L, false);
} else {
player->setGuildRank(rank);
pushBoolean(L, true);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetGuildNick(lua_State* L)
{
// player:getGuildNick()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushString(L, player->getGuildNick());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetGuildNick(lua_State* L)
{
// player:setGuildNick(nick)
const std::string& nick = getString(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setGuildNick(nick);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetGroup(lua_State* L)
{
// player:getGroup()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushUserdata<Group>(L, player->getGroup());
setMetatable(L, -1, "Group");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetGroup(lua_State* L)
{
// player:setGroup(group)
Group* group = getUserdata<Group>(L, 2);
if (!group) {
pushBoolean(L, false);
return 1;
}
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setGroup(group);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetStamina(lua_State* L)
{
// player:getStamina()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getStaminaMinutes());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetStamina(lua_State* L)
{
// player:setStamina(stamina)
uint16_t stamina = getNumber<uint16_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->staminaMinutes = std::min<uint16_t>(2520, stamina);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSoul(lua_State* L)
{
// player:getSoul()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSoul());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddSoul(lua_State* L)
{
// player:addSoul(soulChange)
int32_t soulChange = getNumber<int32_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->changeSoul(soulChange);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetMaxSoul(lua_State* L)
{
// player:getMaxSoul()
Player* player = getUserdata<Player>(L, 1);
if (player && player->vocation) {
lua_pushnumber(L, player->vocation->getSoulMax());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBankBalance(lua_State* L)
{
// player:getBankBalance()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getBankBalance());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetBankBalance(lua_State* L)
{
// player:setBankBalance(bankBalance)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
int64_t balance = getNumber<int64_t>(L, 2);
if (balance < 0) {
reportErrorFunc(L, "Invalid bank balance value.");
lua_pushnil(L);
return 1;
}
player->setBankBalance(balance);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetStorageValue(lua_State* L)
{
// player:getStorageValue(key)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint32_t key = getNumber<uint32_t>(L, 2);
int32_t value;
if (player->getStorageValue(key, value)) {
lua_pushnumber(L, value);
} else {
lua_pushnumber(L, -1);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetStorageValue(lua_State* L)
{
// player:setStorageValue(key, value)
int32_t value = getNumber<int32_t>(L, 3);
uint32_t key = getNumber<uint32_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (IS_IN_KEYRANGE(key, RESERVED_RANGE)) {
reportErrorFunc(L, fmt::format("Accessing reserved range: {:d}", key));
pushBoolean(L, false);
return 1;
}
if (player) {
player->addStorageValue(key, value);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddItem(lua_State* L)
{
// player:addItem(itemId[, count = 1[, canDropOnMap = true[, subType = 1[, slot = CONST_SLOT_WHEREEVER]]]])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
pushBoolean(L, false);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t count = getNumber<int32_t>(L, 3, 1);
int32_t subType = getNumber<int32_t>(L, 5, 1);
const ItemType& it = Item::items[itemId];
int32_t itemCount = 1;
int parameters = lua_gettop(L);
if (parameters >= 4) {
itemCount = std::max<int32_t>(1, count);
} else if (it.hasSubType()) {
if (it.stackable) {
itemCount = std::ceil(count / 100.f);
}
subType = count;
} else {
itemCount = std::max<int32_t>(1, count);
}
bool hasTable = itemCount > 1;
if (hasTable) {
lua_newtable(L);
} else if (itemCount == 0) {
lua_pushnil(L);
return 1;
}
bool canDropOnMap = getBoolean(L, 4, true);
slots_t slot = getNumber<slots_t>(L, 6, CONST_SLOT_WHEREEVER);
for (int32_t i = 1; i <= itemCount; ++i) {
int32_t stackCount = subType;
if (it.stackable) {
stackCount = std::min<int32_t>(stackCount, 100);
subType -= stackCount;
}
Item* item = Item::CreateItem(itemId, stackCount);
if (!item) {
if (!hasTable) {
lua_pushnil(L);
}
return 1;
}
ReturnValue ret = g_game.internalPlayerAddItem(player, item, canDropOnMap, slot);
if (ret != RETURNVALUE_NOERROR) {
delete item;
if (!hasTable) {
lua_pushnil(L);
}
return 1;
}
if (hasTable) {
lua_pushnumber(L, i);
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
lua_settable(L, -3);
} else {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
}
}
return 1;
}
int LuaScriptInterface::luaPlayerAddItemEx(lua_State* L)
{
// player:addItemEx(item[, canDropOnMap = false[, index = INDEX_WHEREEVER[, flags = 0]]])
// player:addItemEx(item[, canDropOnMap = true[, slot = CONST_SLOT_WHEREEVER]])
Item* item = getUserdata<Item>(L, 2);
if (!item) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
if (item->getParent() != VirtualCylinder::virtualCylinder) {
reportErrorFunc(L, "Item already has a parent");
pushBoolean(L, false);
return 1;
}
bool canDropOnMap = getBoolean(L, 3, false);
ReturnValue returnValue;
if (canDropOnMap) {
slots_t slot = getNumber<slots_t>(L, 4, CONST_SLOT_WHEREEVER);
returnValue = g_game.internalPlayerAddItem(player, item, true, slot);
} else {
int32_t index = getNumber<int32_t>(L, 4, INDEX_WHEREEVER);
uint32_t flags = getNumber<uint32_t>(L, 5, 0);
returnValue = g_game.internalAddItem(player, item, index, flags);
}
if (returnValue == RETURNVALUE_NOERROR) {
ScriptEnvironment::removeTempItem(item);
}
lua_pushnumber(L, returnValue);
return 1;
}
int LuaScriptInterface::luaPlayerRemoveItem(lua_State* L)
{
// player:removeItem(itemId, count[, subType = -1[, ignoreEquipped = false]])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
uint32_t count = getNumber<uint32_t>(L, 3);
int32_t subType = getNumber<int32_t>(L, 4, -1);
bool ignoreEquipped = getBoolean(L, 5, false);
pushBoolean(L, player->removeItemOfType(itemId, count, subType, ignoreEquipped));
return 1;
}
int LuaScriptInterface::luaPlayerGetMoney(lua_State* L)
{
// player:getMoney()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getMoney());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddMoney(lua_State* L)
{
// player:addMoney(money)
uint64_t money = getNumber<uint64_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
g_game.addMoney(player, money);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveMoney(lua_State* L)
{
// player:removeMoney(money)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint64_t money = getNumber<uint64_t>(L, 2);
pushBoolean(L, g_game.removeMoney(player, money));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerShowTextDialog(lua_State* L)
{
// player:showTextDialog(id or name or userdata[, text[, canWrite[, length]]])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
int32_t length = getNumber<int32_t>(L, 5, -1);
bool canWrite = getBoolean(L, 4, false);
std::string text;
int parameters = lua_gettop(L);
if (parameters >= 3) {
text = getString(L, 3);
}
Item* item;
if (isNumber(L, 2)) {
item = Item::CreateItem(getNumber<uint16_t>(L, 2));
} else if (isString(L, 2)) {
item = Item::CreateItem(Item::items.getItemIdByName(getString(L, 2)));
} else if (isUserdata(L, 2)) {
if (getUserdataType(L, 2) != LuaData_Item) {
pushBoolean(L, false);
return 1;
}
item = getUserdata<Item>(L, 2);
} else {
item = nullptr;
}
if (!item) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
if (length < 0) {
length = Item::items[item->getID()].maxTextLen;
}
if (!text.empty()) {
item->setText(text);
length = std::max<int32_t>(text.size(), length);
}
item->setParent(player);
player->setWriteItem(item, length);
player->sendTextWindow(item, length, canWrite);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSendTextMessage(lua_State* L)
{
// player:sendTextMessage(type, text[, position, primaryValue = 0, primaryColor = TEXTCOLOR_NONE[, secondaryValue = 0, secondaryColor = TEXTCOLOR_NONE]])
// player:sendTextMessage(type, text, channelId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
int parameters = lua_gettop(L);
TextMessage message(getNumber<MessageClasses>(L, 2), getString(L, 3));
if (parameters == 4) {
uint16_t channelId = getNumber<uint16_t>(L, 4);
ChatChannel* channel = g_chat->getChannel(*player, channelId);
if (!channel || !channel->hasUser(*player)) {
pushBoolean(L, false);
return 1;
}
message.channelId = channelId;
} else {
if (parameters >= 6) {
message.position = getPosition(L, 4);
message.primary.value = getNumber<int32_t>(L, 5);
message.primary.color = getNumber<TextColor_t>(L, 6);
}
if (parameters >= 8) {
message.secondary.value = getNumber<int32_t>(L, 7);
message.secondary.color = getNumber<TextColor_t>(L, 8);
}
}
player->sendTextMessage(message);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSendChannelMessage(lua_State* L)
{
// player:sendChannelMessage(author, text, type, channelId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint16_t channelId = getNumber<uint16_t>(L, 5);
SpeakClasses type = getNumber<SpeakClasses>(L, 4);
const std::string& text = getString(L, 3);
const std::string& author = getString(L, 2);
player->sendChannelMessage(author, text, type, channelId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSendPrivateMessage(lua_State* L)
{
// player:sendPrivateMessage(speaker, text[, type])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
const Player* speaker = getUserdata<const Player>(L, 2);
const std::string& text = getString(L, 3);
SpeakClasses type = getNumber<SpeakClasses>(L, 4, TALKTYPE_PRIVATE_FROM);
player->sendPrivateMessage(speaker, type, text);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerChannelSay(lua_State* L)
{
// player:channelSay(speaker, type, text, channelId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Creature* speaker = getCreature(L, 2);
SpeakClasses type = getNumber<SpeakClasses>(L, 3);
const std::string& text = getString(L, 4);
uint16_t channelId = getNumber<uint16_t>(L, 5);
player->sendToChannel(speaker, type, text, channelId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerOpenChannel(lua_State* L)
{
// player:openChannel(channelId)
uint16_t channelId = getNumber<uint16_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
g_game.playerOpenChannel(player->getID(), channelId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSlotItem(lua_State* L)
{
// player:getSlotItem(slot)
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint32_t slot = getNumber<uint32_t>(L, 2);
Thing* thing = player->getThing(slot);
if (!thing) {
lua_pushnil(L);
return 1;
}
Item* item = thing->getItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetParty(lua_State* L)
{
// player:getParty()
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Party* party = player->getParty();
if (party) {
pushUserdata<Party>(L, party);
setMetatable(L, -1, "Party");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddOutfit(lua_State* L)
{
// player:addOutfit(lookType)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->addOutfit(getNumber<uint16_t>(L, 2), 0);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddOutfitAddon(lua_State* L)
{
// player:addOutfitAddon(lookType, addon)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
uint8_t addon = getNumber<uint8_t>(L, 3);
player->addOutfit(lookType, addon);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveOutfit(lua_State* L)
{
// player:removeOutfit(lookType)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
pushBoolean(L, player->removeOutfit(lookType));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveOutfitAddon(lua_State* L)
{
// player:removeOutfitAddon(lookType, addon)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
uint8_t addon = getNumber<uint8_t>(L, 3);
pushBoolean(L, player->removeOutfitAddon(lookType, addon));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasOutfit(lua_State* L)
{
// player:hasOutfit(lookType[, addon = 0])
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
uint8_t addon = getNumber<uint8_t>(L, 3, 0);
pushBoolean(L, player->hasOutfit(lookType, addon));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerCanWearOutfit(lua_State* L)
{
// player:canWearOutfit(lookType[, addon = 0])
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
uint8_t addon = getNumber<uint8_t>(L, 3, 0);
pushBoolean(L, player->canWear(lookType, addon));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSendOutfitWindow(lua_State* L)
{
// player:sendOutfitWindow()
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->sendOutfitWindow();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddMount(lua_State* L) {
// player:addMount(mountId or mountName)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t mountId;
if (isNumber(L, 2)) {
mountId = getNumber<uint8_t>(L, 2);
} else {
Mount* mount = g_game.mounts.getMountByName(getString(L, 2));
if (!mount) {
lua_pushnil(L);
return 1;
}
mountId = mount->id;
}
pushBoolean(L, player->tameMount(mountId));
return 1;
}
int LuaScriptInterface::luaPlayerRemoveMount(lua_State* L) {
// player:removeMount(mountId or mountName)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t mountId;
if (isNumber(L, 2)) {
mountId = getNumber<uint8_t>(L, 2);
} else {
Mount* mount = g_game.mounts.getMountByName(getString(L, 2));
if (!mount) {
lua_pushnil(L);
return 1;
}
mountId = mount->id;
}
pushBoolean(L, player->untameMount(mountId));
return 1;
}
int LuaScriptInterface::luaPlayerHasMount(lua_State* L) {
// player:hasMount(mountId or mountName)
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Mount* mount = nullptr;
if (isNumber(L, 2)) {
mount = g_game.mounts.getMountByID(getNumber<uint8_t>(L, 2));
} else {
mount = g_game.mounts.getMountByName(getString(L, 2));
}
if (mount) {
pushBoolean(L, player->hasMount(mount));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetPremiumEndsAt(lua_State* L)
{
// player:getPremiumEndsAt()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->premiumEndsAt);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetPremiumEndsAt(lua_State* L)
{
// player:setPremiumEndsAt(timestamp)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
time_t timestamp = getNumber<time_t>(L, 2);
player->setPremiumTime(timestamp);
IOLoginData::updatePremiumTime(player->getAccount(), timestamp);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerHasBlessing(lua_State* L)
{
// player:hasBlessing(blessing)
uint8_t blessing = getNumber<uint8_t>(L, 2) - 1;
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->hasBlessing(blessing));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddBlessing(lua_State* L)
{
// player:addBlessing(blessing)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t blessing = getNumber<uint8_t>(L, 2) - 1;
if (player->hasBlessing(blessing)) {
pushBoolean(L, false);
return 1;
}
player->addBlessing(blessing);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerRemoveBlessing(lua_State* L)
{
// player:removeBlessing(blessing)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t blessing = getNumber<uint8_t>(L, 2) - 1;
if (!player->hasBlessing(blessing)) {
pushBoolean(L, false);
return 1;
}
player->removeBlessing(blessing);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerCanLearnSpell(lua_State* L)
{
// player:canLearnSpell(spellName)
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
const std::string& spellName = getString(L, 2);
InstantSpell* spell = g_spells->getInstantSpellByName(spellName);
if (!spell) {
reportErrorFunc(L, "Spell \"" + spellName + "\" not found");
pushBoolean(L, false);
return 1;
}
if (player->hasFlag(PlayerFlag_IgnoreSpellCheck)) {
pushBoolean(L, true);
return 1;
}
const auto& vocMap = spell->getVocMap();
if (vocMap.count(player->getVocationId()) == 0) {
pushBoolean(L, false);
} else if (player->getLevel() < spell->getLevel()) {
pushBoolean(L, false);
} else if (player->getMagicLevel() < spell->getMagicLevel()) {
pushBoolean(L, false);
} else {
pushBoolean(L, true);
}
return 1;
}
int LuaScriptInterface::luaPlayerLearnSpell(lua_State* L)
{
// player:learnSpell(spellName)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& spellName = getString(L, 2);
player->learnInstantSpell(spellName);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerForgetSpell(lua_State* L)
{
// player:forgetSpell(spellName)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& spellName = getString(L, 2);
player->forgetInstantSpell(spellName);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasLearnedSpell(lua_State* L)
{
// player:hasLearnedSpell(spellName)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& spellName = getString(L, 2);
pushBoolean(L, player->hasLearnedInstantSpell(spellName));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSendTutorial(lua_State* L)
{
// player:sendTutorial(tutorialId)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint8_t tutorialId = getNumber<uint8_t>(L, 2);
player->sendTutorial(tutorialId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddMapMark(lua_State* L)
{
// player:addMapMark(position, type, description)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const Position& position = getPosition(L, 2);
uint8_t type = getNumber<uint8_t>(L, 3);
const std::string& description = getString(L, 4);
player->sendAddMarker(position, type, description);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSave(lua_State* L)
{
// player:save()
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->loginPosition = player->getPosition();
pushBoolean(L, IOLoginData::savePlayer(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerPopupFYI(lua_State* L)
{
// player:popupFYI(message)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& message = getString(L, 2);
player->sendFYIBox(message);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerIsPzLocked(lua_State* L)
{
// player:isPzLocked()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->isPzLocked());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetClient(lua_State* L)
{
// player:getClient()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_createtable(L, 0, 2);
setField(L, "version", player->getProtocolVersion());
setField(L, "os", player->getOperatingSystem());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetHouse(lua_State* L)
{
// player:getHouse()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
House* house = g_game.map.houses.getHouseByPlayerId(player->getGUID());
if (house) {
pushUserdata<House>(L, house);
setMetatable(L, -1, "House");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSendHouseWindow(lua_State* L)
{
// player:sendHouseWindow(house, listId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
House* house = getUserdata<House>(L, 2);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 3);
player->sendHouseWindow(house, listId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSetEditHouse(lua_State* L)
{
// player:setEditHouse(house, listId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
House* house = getUserdata<House>(L, 2);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 3);
player->setEditHouse(house, listId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSetGhostMode(lua_State* L)
{
// player:setGhostMode(enabled[, showEffect=true])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
bool enabled = getBoolean(L, 2);
if (player->isInGhostMode() == enabled) {
pushBoolean(L, true);
return 1;
}
bool showEffect = getBoolean(L, 3, true);
player->switchGhostMode();
Tile* tile = player->getTile();
const Position& position = player->getPosition();
const bool isInvisible = player->isInvisible();
SpectatorVec spectators;
g_game.map.getSpectators(spectators, position, true, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer != player && !tmpPlayer->isAccessPlayer()) {
if (enabled) {
tmpPlayer->sendRemoveTileCreature(player, position, tile->getClientIndexOfCreature(tmpPlayer, player));
} else {
tmpPlayer->sendCreatureAppear(player, position, showEffect);
}
} else {
if (isInvisible) {
continue;
}
tmpPlayer->sendCreatureChangeVisible(player, !enabled);
}
}
if (player->isInGhostMode()) {
for (const auto& it : g_game.getPlayers()) {
if (!it.second->isAccessPlayer()) {
it.second->notifyStatusChange(player, VIPSTATUS_OFFLINE);
}
}
IOLoginData::updateOnlineStatus(player->getGUID(), false);
} else {
for (const auto& it : g_game.getPlayers()) {
if (!it.second->isAccessPlayer()) {
it.second->notifyStatusChange(player, VIPSTATUS_ONLINE);
}
}
IOLoginData::updateOnlineStatus(player->getGUID(), true);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetContainerId(lua_State* L)
{
// player:getContainerId(container)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Container* container = getUserdata<Container>(L, 2);
if (container) {
lua_pushnumber(L, player->getContainerID(container));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetContainerById(lua_State* L)
{
// player:getContainerById(id)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Container* container = player->getContainerByID(getNumber<uint8_t>(L, 2));
if (container) {
pushUserdata<Container>(L, container);
setMetatable(L, -1, "Container");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetContainerIndex(lua_State* L)
{
// player:getContainerIndex(id)
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getContainerIndex(getNumber<uint8_t>(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetInstantSpells(lua_State* L)
{
// player:getInstantSpells()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
std::vector<const InstantSpell*> spells;
for (auto& spell : g_spells->getInstantSpells()) {
if (spell.second.canCast(player)) {
spells.push_back(&spell.second);
}
}
lua_createtable(L, spells.size(), 0);
int index = 0;
for (auto spell : spells) {
pushInstantSpell(L, *spell);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaPlayerCanCast(lua_State* L)
{
// player:canCast(spell)
Player* player = getUserdata<Player>(L, 1);
InstantSpell* spell = getUserdata<InstantSpell>(L, 2);
if (player && spell) {
pushBoolean(L, spell->canCast(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasChaseMode(lua_State* L)
{
// player:hasChaseMode()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->chaseMode);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasSecureMode(lua_State* L)
{
// player:hasSecureMode()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->secureMode);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetFightMode(lua_State* L)
{
// player:getFightMode()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->fightMode);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetStoreInbox(lua_State* L)
{
// player:getStoreInbox()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Container* storeInbox = player->getStoreInbox();
if (!storeInbox) {
lua_pushnil(L);
return 1;
}
pushUserdata<Container>(L, storeInbox);
setMetatable(L, -1, "Container");
return 1;
}
// Monster
int LuaScriptInterface::luaMonsterCreate(lua_State* L)
{
// Monster(id or userdata)
Monster* monster;
if (isNumber(L, 2)) {
monster = g_game.getMonsterByID(getNumber<uint32_t>(L, 2));
} else if (isUserdata(L, 2)) {
if (getUserdataType(L, 2) != LuaData_Monster) {
lua_pushnil(L);
return 1;
}
monster = getUserdata<Monster>(L, 2);
} else {
monster = nullptr;
}
if (monster) {
pushUserdata<Monster>(L, monster);
setMetatable(L, -1, "Monster");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsMonster(lua_State* L)
{
// monster:isMonster()
pushBoolean(L, getUserdata<const Monster>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaMonsterGetType(lua_State* L)
{
// monster:getType()
const Monster* monster = getUserdata<const Monster>(L, 1);
if (monster) {
pushUserdata<MonsterType>(L, monster->mType);
setMetatable(L, -1, "MonsterType");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterRename(lua_State* L)
{
// monster:rename(name[, nameDescription])
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
monster->setName(getString(L, 2));
if (lua_gettop(L) >= 3) {
monster->setNameDescription(getString(L, 3));
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMonsterGetSpawnPosition(lua_State* L)
{
// monster:getSpawnPosition()
const Monster* monster = getUserdata<const Monster>(L, 1);
if (monster) {
pushPosition(L, monster->getMasterPos());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsInSpawnRange(lua_State* L)
{
// monster:isInSpawnRange([position])
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
pushBoolean(L, monster->isInSpawnRange(lua_gettop(L) >= 2 ? getPosition(L, 2) : monster->getPosition()));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsIdle(lua_State* L)
{
// monster:isIdle()
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
pushBoolean(L, monster->getIdleStatus());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSetIdle(lua_State* L)
{
// monster:setIdle(idle)
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
monster->setIdle(getBoolean(L, 2));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMonsterIsTarget(lua_State* L)
{
// monster:isTarget(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
const Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, monster->isTarget(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsOpponent(lua_State* L)
{
// monster:isOpponent(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
const Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, monster->isOpponent(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsFriend(lua_State* L)
{
// monster:isFriend(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
const Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, monster->isFriend(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterAddFriend(lua_State* L)
{
// monster:addFriend(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
monster->addFriend(creature);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterRemoveFriend(lua_State* L)
{
// monster:removeFriend(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
monster->removeFriend(creature);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterGetFriendList(lua_State* L)
{
// monster:getFriendList()
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
const auto& friendList = monster->getFriendList();
lua_createtable(L, friendList.size(), 0);
int index = 0;
for (Creature* creature : friendList) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterGetFriendCount(lua_State* L)
{
// monster:getFriendCount()
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
lua_pushnumber(L, monster->getFriendList().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterAddTarget(lua_State* L)
{
// monster:addTarget(creature[, pushFront = false])
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
bool pushFront = getBoolean(L, 3, false);
monster->addTarget(creature, pushFront);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMonsterRemoveTarget(lua_State* L)
{
// monster:removeTarget(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
monster->removeTarget(creature);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMonsterGetTargetList(lua_State* L)
{
// monster:getTargetList()
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
const auto& targetList = monster->getTargetList();
lua_createtable(L, targetList.size(), 0);
int index = 0;
for (Creature* creature : targetList) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterGetTargetCount(lua_State* L)
{
// monster:getTargetCount()
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
lua_pushnumber(L, monster->getTargetList().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSelectTarget(lua_State* L)
{
// monster:selectTarget(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, monster->selectTarget(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSearchTarget(lua_State* L)
{
// monster:searchTarget([searchType = TARGETSEARCH_DEFAULT])
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
TargetSearchType_t searchType = getNumber<TargetSearchType_t>(L, 2, TARGETSEARCH_DEFAULT);
pushBoolean(L, monster->searchTarget(searchType));
} else {
lua_pushnil(L);
}
return 1;
}
// Npc
int LuaScriptInterface::luaNpcCreate(lua_State* L)
{
// Npc([id or name or userdata])
Npc* npc;
if (lua_gettop(L) >= 2) {
if (isNumber(L, 2)) {
npc = g_game.getNpcByID(getNumber<uint32_t>(L, 2));
} else if (isString(L, 2)) {
npc = g_game.getNpcByName(getString(L, 2));
} else if (isUserdata(L, 2)) {
if (getUserdataType(L, 2) != LuaData_Npc) {
lua_pushnil(L);
return 1;
}
npc = getUserdata<Npc>(L, 2);
} else {
npc = nullptr;
}
} else {
npc = getScriptEnv()->getNpc();
}
if (npc) {
pushUserdata<Npc>(L, npc);
setMetatable(L, -1, "Npc");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNpcIsNpc(lua_State* L)
{
// npc:isNpc()
pushBoolean(L, getUserdata<const Npc>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaNpcSetMasterPos(lua_State* L)
{
// npc:setMasterPos(pos[, radius])
Npc* npc = getUserdata<Npc>(L, 1);
if (!npc) {
lua_pushnil(L);
return 1;
}
const Position& pos = getPosition(L, 2);
int32_t radius = getNumber<int32_t>(L, 3, 1);
npc->setMasterPos(pos, radius);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaNpcGetSpeechBubble(lua_State* L)
{
// npc:getSpeechBubble()
Npc* npc = getUserdata<Npc>(L, 1);
if (npc) {
lua_pushnumber(L, npc->getSpeechBubble());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNpcSetSpeechBubble(lua_State* L)
{
// npc:setSpeechBubble(speechBubble)
Npc* npc = getUserdata<Npc>(L, 1);
if (npc) {
npc->setSpeechBubble(getNumber<uint8_t>(L, 2));
}
return 0;
}
// Guild
int LuaScriptInterface::luaGuildCreate(lua_State* L)
{
// Guild(id)
uint32_t id = getNumber<uint32_t>(L, 2);
Guild* guild = g_game.getGuild(id);
if (guild) {
pushUserdata<Guild>(L, guild);
setMetatable(L, -1, "Guild");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetId(lua_State* L)
{
// guild:getId()
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
lua_pushnumber(L, guild->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetName(lua_State* L)
{
// guild:getName()
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
pushString(L, guild->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetMembersOnline(lua_State* L)
{
// guild:getMembersOnline()
const Guild* guild = getUserdata<const Guild>(L, 1);
if (!guild) {
lua_pushnil(L);
return 1;
}
const auto& members = guild->getMembersOnline();
lua_createtable(L, members.size(), 0);
int index = 0;
for (Player* player : members) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGuildAddRank(lua_State* L)
{
// guild:addRank(id, name, level)
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
uint32_t id = getNumber<uint32_t>(L, 2);
const std::string& name = getString(L, 3);
uint8_t level = getNumber<uint8_t>(L, 4);
guild->addRank(id, name, level);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetRankById(lua_State* L)
{
// guild:getRankById(id)
Guild* guild = getUserdata<Guild>(L, 1);
if (!guild) {
lua_pushnil(L);
return 1;
}
uint32_t id = getNumber<uint32_t>(L, 2);
GuildRank_ptr rank = guild->getRankById(id);
if (rank) {
lua_createtable(L, 0, 3);
setField(L, "id", rank->id);
setField(L, "name", rank->name);
setField(L, "level", rank->level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetRankByLevel(lua_State* L)
{
// guild:getRankByLevel(level)
const Guild* guild = getUserdata<const Guild>(L, 1);
if (!guild) {
lua_pushnil(L);
return 1;
}
uint8_t level = getNumber<uint8_t>(L, 2);
GuildRank_ptr rank = guild->getRankByLevel(level);
if (rank) {
lua_createtable(L, 0, 3);
setField(L, "id", rank->id);
setField(L, "name", rank->name);
setField(L, "level", rank->level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetMotd(lua_State* L)
{
// guild:getMotd()
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
pushString(L, guild->getMotd());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildSetMotd(lua_State* L)
{
// guild:setMotd(motd)
const std::string& motd = getString(L, 2);
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
guild->setMotd(motd);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Group
int LuaScriptInterface::luaGroupCreate(lua_State* L)
{
// Group(id)
uint32_t id = getNumber<uint32_t>(L, 2);
Group* group = g_game.groups.getGroup(id);
if (group) {
pushUserdata<Group>(L, group);
setMetatable(L, -1, "Group");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetId(lua_State* L)
{
// group:getId()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->id);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetName(lua_State* L)
{
// group:getName()
Group* group = getUserdata<Group>(L, 1);
if (group) {
pushString(L, group->name);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetFlags(lua_State* L)
{
// group:getFlags()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->flags);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetAccess(lua_State* L)
{
// group:getAccess()
Group* group = getUserdata<Group>(L, 1);
if (group) {
pushBoolean(L, group->access);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetMaxDepotItems(lua_State* L)
{
// group:getMaxDepotItems()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->maxDepotItems);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetMaxVipEntries(lua_State* L)
{
// group:getMaxVipEntries()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->maxVipEntries);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupHasFlag(lua_State* L)
{
// group:hasFlag(flag)
Group* group = getUserdata<Group>(L, 1);
if (group) {
PlayerFlags flag = getNumber<PlayerFlags>(L, 2);
pushBoolean(L, (group->flags & flag) != 0);
} else {
lua_pushnil(L);
}
return 1;
}
// Vocation
int LuaScriptInterface::luaVocationCreate(lua_State* L)
{
// Vocation(id or name)
uint32_t id;
if (isNumber(L, 2)) {
id = getNumber<uint32_t>(L, 2);
} else {
id = g_vocations.getVocationId(getString(L, 2));
}
Vocation* vocation = g_vocations.getVocation(id);
if (vocation) {
pushUserdata<Vocation>(L, vocation);
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetId(lua_State* L)
{
// vocation:getId()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetClientId(lua_State* L)
{
// vocation:getClientId()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getClientId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetName(lua_State* L)
{
// vocation:getName()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
pushString(L, vocation->getVocName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetDescription(lua_State* L)
{
// vocation:getDescription()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
pushString(L, vocation->getVocDescription());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetRequiredSkillTries(lua_State* L)
{
// vocation:getRequiredSkillTries(skillType, skillLevel)
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
skills_t skillType = getNumber<skills_t>(L, 2);
uint16_t skillLevel = getNumber<uint16_t>(L, 3);
lua_pushnumber(L, vocation->getReqSkillTries(skillType, skillLevel));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetRequiredManaSpent(lua_State* L)
{
// vocation:getRequiredManaSpent(magicLevel)
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
uint32_t magicLevel = getNumber<uint32_t>(L, 2);
lua_pushnumber(L, vocation->getReqMana(magicLevel));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetCapacityGain(lua_State* L)
{
// vocation:getCapacityGain()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getCapGain());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetHealthGain(lua_State* L)
{
// vocation:getHealthGain()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getHPGain());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetHealthGainTicks(lua_State* L)
{
// vocation:getHealthGainTicks()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getHealthGainTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetHealthGainAmount(lua_State* L)
{
// vocation:getHealthGainAmount()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getHealthGainAmount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetManaGain(lua_State* L)
{
// vocation:getManaGain()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getManaGain());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetManaGainTicks(lua_State* L)
{
// vocation:getManaGainTicks()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getManaGainTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetManaGainAmount(lua_State* L)
{
// vocation:getManaGainAmount()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getManaGainAmount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetMaxSoul(lua_State* L)
{
// vocation:getMaxSoul()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getSoulMax());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetSoulGainTicks(lua_State* L)
{
// vocation:getSoulGainTicks()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getSoulGainTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetAttackSpeed(lua_State* L)
{
// vocation:getAttackSpeed()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getAttackSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetBaseSpeed(lua_State* L)
{
// vocation:getBaseSpeed()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getBaseSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetDemotion(lua_State* L)
{
// vocation:getDemotion()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (!vocation) {
lua_pushnil(L);
return 1;
}
uint16_t fromId = vocation->getFromVocation();
if (fromId == VOCATION_NONE) {
lua_pushnil(L);
return 1;
}
Vocation* demotedVocation = g_vocations.getVocation(fromId);
if (demotedVocation && demotedVocation != vocation) {
pushUserdata<Vocation>(L, demotedVocation);
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetPromotion(lua_State* L)
{
// vocation:getPromotion()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (!vocation) {
lua_pushnil(L);
return 1;
}
uint16_t promotedId = g_vocations.getPromotedVocation(vocation->getId());
if (promotedId == VOCATION_NONE) {
lua_pushnil(L);
return 1;
}
Vocation* promotedVocation = g_vocations.getVocation(promotedId);
if (promotedVocation && promotedVocation != vocation) {
pushUserdata<Vocation>(L, promotedVocation);
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationAllowsPvp(lua_State* L)
{
// vocation:allowsPvp()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
pushBoolean(L, vocation->allowsPvp());
} else {
lua_pushnil(L);
}
return 1;
}
// Town
int LuaScriptInterface::luaTownCreate(lua_State* L)
{
// Town(id or name)
Town* town;
if (isNumber(L, 2)) {
town = g_game.map.towns.getTown(getNumber<uint32_t>(L, 2));
} else if (isString(L, 2)) {
town = g_game.map.towns.getTown(getString(L, 2));
} else {
town = nullptr;
}
if (town) {
pushUserdata<Town>(L, town);
setMetatable(L, -1, "Town");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTownGetId(lua_State* L)
{
// town:getId()
Town* town = getUserdata<Town>(L, 1);
if (town) {
lua_pushnumber(L, town->getID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTownGetName(lua_State* L)
{
// town:getName()
Town* town = getUserdata<Town>(L, 1);
if (town) {
pushString(L, town->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTownGetTemplePosition(lua_State* L)
{
// town:getTemplePosition()
Town* town = getUserdata<Town>(L, 1);
if (town) {
pushPosition(L, town->getTemplePosition());
} else {
lua_pushnil(L);
}
return 1;
}
// House
int LuaScriptInterface::luaHouseCreate(lua_State* L)
{
// House(id)
House* house = g_game.map.houses.getHouse(getNumber<uint32_t>(L, 2));
if (house) {
pushUserdata<House>(L, house);
setMetatable(L, -1, "House");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetId(lua_State* L)
{
// house:getId()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetName(lua_State* L)
{
// house:getName()
House* house = getUserdata<House>(L, 1);
if (house) {
pushString(L, house->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetTown(lua_State* L)
{
// house:getTown()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
Town* town = g_game.map.towns.getTown(house->getTownId());
if (town) {
pushUserdata<Town>(L, town);
setMetatable(L, -1, "Town");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetExitPosition(lua_State* L)
{
// house:getExitPosition()
House* house = getUserdata<House>(L, 1);
if (house) {
pushPosition(L, house->getEntryPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetRent(lua_State* L)
{
// house:getRent()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getRent());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetOwnerGuid(lua_State* L)
{
// house:getOwnerGuid()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getOwner());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseSetOwnerGuid(lua_State* L)
{
// house:setOwnerGuid(guid[, updateDatabase = true])
House* house = getUserdata<House>(L, 1);
if (house) {
uint32_t guid = getNumber<uint32_t>(L, 2);
bool updateDatabase = getBoolean(L, 3, true);
house->setOwner(guid, updateDatabase);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseStartTrade(lua_State* L)
{
// house:startTrade(player, tradePartner)
House* house = getUserdata<House>(L, 1);
Player* player = getUserdata<Player>(L, 2);
Player* tradePartner = getUserdata<Player>(L, 3);
if (!player || !tradePartner || !house) {
lua_pushnil(L);
return 1;
}
if (!Position::areInRange<2, 2, 0>(tradePartner->getPosition(), player->getPosition())) {
lua_pushnumber(L, RETURNVALUE_TRADEPLAYERFARAWAY);
return 1;
}
if (house->getOwner() != player->getGUID()) {
lua_pushnumber(L, RETURNVALUE_YOUDONTOWNTHISHOUSE);
return 1;
}
if (g_game.map.houses.getHouseByPlayerId(tradePartner->getGUID())) {
lua_pushnumber(L, RETURNVALUE_TRADEPLAYERALREADYOWNSAHOUSE);
return 1;
}
if (IOLoginData::hasBiddedOnHouse(tradePartner->getGUID())) {
lua_pushnumber(L, RETURNVALUE_TRADEPLAYERHIGHESTBIDDER);
return 1;
}
Item* transferItem = house->getTransferItem();
if (!transferItem) {
lua_pushnumber(L, RETURNVALUE_YOUCANNOTTRADETHISHOUSE);
return 1;
}
transferItem->getParent()->setParent(player);
if (!g_game.internalStartTrade(player, tradePartner, transferItem)) {
house->resetTransferItem();
}
lua_pushnumber(L, RETURNVALUE_NOERROR);
return 1;
}
int LuaScriptInterface::luaHouseGetBeds(lua_State* L)
{
// house:getBeds()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
const auto& beds = house->getBeds();
lua_createtable(L, beds.size(), 0);
int index = 0;
for (BedItem* bedItem : beds) {
pushUserdata<Item>(L, bedItem);
setItemMetatable(L, -1, bedItem);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaHouseGetBedCount(lua_State* L)
{
// house:getBedCount()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getBedCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetDoors(lua_State* L)
{
// house:getDoors()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
const auto& doors = house->getDoors();
lua_createtable(L, doors.size(), 0);
int index = 0;
for (Door* door : doors) {
pushUserdata<Item>(L, door);
setItemMetatable(L, -1, door);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaHouseGetDoorCount(lua_State* L)
{
// house:getDoorCount()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getDoors().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetDoorIdByPosition(lua_State* L)
{
// house:getDoorIdByPosition(position)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
Door* door = house->getDoorByPosition(getPosition(L, 2));
if (door) {
lua_pushnumber(L, door->getDoorId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetTiles(lua_State* L)
{
// house:getTiles()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
const auto& tiles = house->getTiles();
lua_createtable(L, tiles.size(), 0);
int index = 0;
for (Tile* tile : tiles) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaHouseGetItems(lua_State* L)
{
// house:getItems()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
const auto& tiles = house->getTiles();
lua_newtable(L);
int index = 0;
for (Tile* tile : tiles) {
TileItemVector* itemVector = tile->getItemList();
if(itemVector) {
for(Item* item : *itemVector) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
lua_rawseti(L, -2, ++index);
}
}
}
return 1;
}
int LuaScriptInterface::luaHouseGetTileCount(lua_State* L)
{
// house:getTileCount()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getTiles().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseCanEditAccessList(lua_State* L)
{
// house:canEditAccessList(listId, player)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 2);
Player* player = getPlayer(L, 3);
pushBoolean(L, house->canEditAccessList(listId, player));
return 1;
}
int LuaScriptInterface::luaHouseGetAccessList(lua_State* L)
{
// house:getAccessList(listId)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
std::string list;
uint32_t listId = getNumber<uint32_t>(L, 2);
if (house->getAccessList(listId, list)) {
pushString(L, list);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaHouseSetAccessList(lua_State* L)
{
// house:setAccessList(listId, list)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 2);
const std::string& list = getString(L, 3);
house->setAccessList(listId, list);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaHouseKickPlayer(lua_State* L)
{
// house:kickPlayer(player, targetPlayer)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
pushBoolean(L, house->kickPlayer(getPlayer(L, 2), getPlayer(L, 3)));
return 1;
}
int LuaScriptInterface::luaHouseSave(lua_State* L)
{
// house:save()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
pushBoolean(L, IOMapSerialize::saveHouse(house));
return 1;
}
// ItemType
int LuaScriptInterface::luaItemTypeCreate(lua_State* L)
{
// ItemType(id or name)
uint32_t id;
if (isNumber(L, 2)) {
id = getNumber<uint32_t>(L, 2);
} else if (isString(L, 2)) {
id = Item::items.getItemIdByName(getString(L, 2));
} else {
lua_pushnil(L);
return 1;
}
const ItemType& itemType = Item::items[id];
pushUserdata<const ItemType>(L, &itemType);
setMetatable(L, -1, "ItemType");
return 1;
}
int LuaScriptInterface::luaItemTypeIsCorpse(lua_State* L)
{
// itemType:isCorpse()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->corpseType != RACE_NONE);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsDoor(lua_State* L)
{
// itemType:isDoor()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isDoor());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsContainer(lua_State* L)
{
// itemType:isContainer()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isContainer());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsFluidContainer(lua_State* L)
{
// itemType:isFluidContainer()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isFluidContainer());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsMovable(lua_State* L)
{
// itemType:isMovable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->moveable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsRune(lua_State* L)
{
// itemType:isRune()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isRune());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsStackable(lua_State* L)
{
// itemType:isStackable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->stackable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsReadable(lua_State* L)
{
// itemType:isReadable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->canReadText);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsWritable(lua_State* L)
{
// itemType:isWritable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->canWriteText);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsBlocking(lua_State* L)
{
// itemType:isBlocking()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->blockProjectile || itemType->blockSolid);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsGroundTile(lua_State* L)
{
// itemType:isGroundTile()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isGroundTile());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsMagicField(lua_State* L)
{
// itemType:isMagicField()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isMagicField());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsUseable(lua_State* L)
{
// itemType:isUseable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isUseable());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsPickupable(lua_State* L)
{
// itemType:isPickupable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isPickupable());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetType(lua_State* L)
{
// itemType:getType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->type);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetGroup(lua_State* L)
{
// itemType:getGroup()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->group);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetId(lua_State* L)
{
// itemType:getId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->id);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetClientId(lua_State* L)
{
// itemType:getClientId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->clientId);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetName(lua_State* L)
{
// itemType:getName()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->name);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetPluralName(lua_State* L)
{
// itemType:getPluralName()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->getPluralName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetArticle(lua_State* L)
{
// itemType:getArticle()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->article);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDescription(lua_State* L)
{
// itemType:getDescription()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->description);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetSlotPosition(lua_State *L)
{
// itemType:getSlotPosition()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->slotPosition);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetCharges(lua_State* L)
{
// itemType:getCharges()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->charges);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetFluidSource(lua_State* L)
{
// itemType:getFluidSource()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->fluidSource);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetCapacity(lua_State* L)
{
// itemType:getCapacity()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->maxItems);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetWeight(lua_State* L)
{
// itemType:getWeight([count = 1])
uint16_t count = getNumber<uint16_t>(L, 2, 1);
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (!itemType) {
lua_pushnil(L);
return 1;
}
uint64_t weight = static_cast<uint64_t>(itemType->weight) * std::max<int32_t>(1, count);
lua_pushnumber(L, weight);
return 1;
}
int LuaScriptInterface::luaItemTypeGetHitChance(lua_State* L)
{
// itemType:getHitChance()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->hitChance);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetShootRange(lua_State* L)
{
// itemType:getShootRange()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->shootRange);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetAttack(lua_State* L)
{
// itemType:getAttack()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->attack);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetAttackSpeed(lua_State* L)
{
// itemType:getAttackSpeed()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->attackSpeed);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDefense(lua_State* L)
{
// itemType:getDefense()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->defense);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetExtraDefense(lua_State* L)
{
// itemType:getExtraDefense()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->extraDefense);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetArmor(lua_State* L)
{
// itemType:getArmor()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->armor);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetWeaponType(lua_State* L)
{
// itemType:getWeaponType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->weaponType);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetAmmoType(lua_State* L)
{
// itemType:getAmmoType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->ammoType);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetCorpseType(lua_State* L)
{
// itemType:getCorpseType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->corpseType);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetAbilities(lua_State* L)
{
// itemType:getAbilities()
ItemType* itemType = getUserdata<ItemType>(L, 1);
if (itemType) {
Abilities& abilities = itemType->getAbilities();
lua_createtable(L, 6, 12);
setField(L, "healthGain", abilities.healthGain);
setField(L, "healthTicks", abilities.healthTicks);
setField(L, "manaGain", abilities.manaGain);
setField(L, "manaTicks", abilities.manaTicks);
setField(L, "conditionImmunities", abilities.conditionImmunities);
setField(L, "conditionSuppressions", abilities.conditionSuppressions);
setField(L, "speed", abilities.speed);
setField(L, "elementDamage", abilities.elementDamage);
setField(L, "elementType", abilities.elementType);
lua_pushboolean(L, abilities.manaShield);
lua_setfield(L, -2, "manaShield");
lua_pushboolean(L, abilities.invisible);
lua_setfield(L, -2, "invisible");
lua_pushboolean(L, abilities.regeneration);
lua_setfield(L, -2, "regeneration");
// Stats
lua_createtable(L, 0, STAT_LAST + 1);
for (int32_t i = STAT_FIRST; i <= STAT_LAST; i++) {
lua_pushnumber(L, abilities.stats[i]);
lua_rawseti(L, -2, i + 1);
}
lua_setfield(L, -2, "stats");
// Stats percent
lua_createtable(L, 0, STAT_LAST + 1);
for (int32_t i = STAT_FIRST; i <= STAT_LAST; i++) {
lua_pushnumber(L, abilities.statsPercent[i]);
lua_rawseti(L, -2, i + 1);
}
lua_setfield(L, -2, "statsPercent");
// Skills
lua_createtable(L, 0, SKILL_LAST + 1);
for (int32_t i = SKILL_FIRST; i <= SKILL_LAST; i++) {
lua_pushnumber(L, abilities.skills[i]);
lua_rawseti(L, -2, i + 1);
}
lua_setfield(L, -2, "skills");
// Special skills
lua_createtable(L, 0, SPECIALSKILL_LAST + 1);
for (int32_t i = SPECIALSKILL_FIRST; i <= SPECIALSKILL_LAST; i++) {
lua_pushnumber(L, abilities.specialSkills[i]);
lua_rawseti(L, -2, i + 1);
}
lua_setfield(L, -2, "specialSkills");
// Field absorb percent
lua_createtable(L, 0, COMBAT_COUNT);
for (int32_t i = 0; i < COMBAT_COUNT; i++) {
lua_pushnumber(L, abilities.fieldAbsorbPercent[i]);
lua_rawseti(L, -2, i + 1);
}
lua_setfield(L, -2, "fieldAbsorbPercent");
// Absorb percent
lua_createtable(L, 0, COMBAT_COUNT);
for (int32_t i = 0; i < COMBAT_COUNT; i++) {
lua_pushnumber(L, abilities.absorbPercent[i]);
lua_rawseti(L, -2, i + 1);
}
lua_setfield(L, -2, "absorbPercent");
}
return 1;
}
int LuaScriptInterface::luaItemTypeHasShowAttributes(lua_State* L)
{
// itemType:hasShowAttributes()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->showAttributes);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeHasShowCount(lua_State* L)
{
// itemType:hasShowCount()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->showCount);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeHasShowCharges(lua_State* L)
{
// itemType:hasShowCharges()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->showCharges);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeHasShowDuration(lua_State* L)
{
// itemType:hasShowDuration()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->showDuration);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeHasAllowDistRead(lua_State* L)
{
// itemType:hasAllowDistRead()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->allowDistRead);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetWieldInfo(lua_State* L)
{
// itemType:getWieldInfo()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushinteger(L, itemType->wieldInfo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDuration(lua_State* L)
{
// itemType:getDuration()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushinteger(L, itemType->decayTime);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetLevelDoor(lua_State* L)
{
// itemType:getLevelDoor()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushinteger(L, itemType->levelDoor);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetVocationString(lua_State* L)
{
// itemType:getVocationString()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->vocationString);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetMinReqLevel(lua_State* L)
{
// itemType:getMinReqLevel()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushinteger(L, itemType->minReqLevel);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetMinReqMagicLevel(lua_State* L)
{
// itemType:getMinReqMagicLevel()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushinteger(L, itemType->minReqMagicLevel);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetElementType(lua_State* L)
{
// itemType:getElementType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (!itemType) {
lua_pushnil(L);
return 1;
}
auto& abilities = itemType->abilities;
if (abilities) {
lua_pushnumber(L, abilities->elementType);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetElementDamage(lua_State* L)
{
// itemType:getElementDamage()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (!itemType) {
lua_pushnil(L);
return 1;
}
auto& abilities = itemType->abilities;
if (abilities) {
lua_pushnumber(L, abilities->elementDamage);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetTransformEquipId(lua_State* L)
{
// itemType:getTransformEquipId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->transformEquipTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetTransformDeEquipId(lua_State* L)
{
// itemType:getTransformDeEquipId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->transformDeEquipTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDestroyId(lua_State* L)
{
// itemType:getDestroyId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->destroyTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDecayId(lua_State* L)
{
// itemType:getDecayId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->decayTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetRequiredLevel(lua_State* L)
{
// itemType:getRequiredLevel()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->minReqLevel);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeHasSubType(lua_State* L)
{
// itemType:hasSubType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->hasSubType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsStoreItem(lua_State* L)
{
// itemType:isStoreItem()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->storeItem);
} else {
lua_pushnil(L);
}
return 1;
}
// Combat
int LuaScriptInterface::luaCombatCreate(lua_State* L)
{
// Combat()
pushSharedPtr(L, g_luaEnvironment.createCombatObject(getScriptEnv()->getScriptInterface()));
setMetatable(L, -1, "Combat");
return 1;
}
int LuaScriptInterface::luaCombatDelete(lua_State* L)
{
Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (combat) {
combat.reset();
}
return 0;
}
int LuaScriptInterface::luaCombatSetParameter(lua_State* L)
{
// combat:setParameter(key, value)
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
CombatParam_t key = getNumber<CombatParam_t>(L, 2);
uint32_t value;
if (isBoolean(L, 3)) {
value = getBoolean(L, 3) ? 1 : 0;
} else {
value = getNumber<uint32_t>(L, 3);
}
combat->setParam(key, value);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCombatGetParameter(lua_State* L)
{
// combat:getParameter(key)
Combat* combat = getUserdata<Combat>(L, 1);
if (!combat) {
lua_pushnil(L);
return 1;
}
int32_t value = combat->getParam(getNumber<CombatParam_t>(L, 2));
if (value == std::numeric_limits<int32_t>().max()) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, value);
return 1;
}
int LuaScriptInterface::luaCombatSetFormula(lua_State* L)
{
// combat:setFormula(type, mina, minb, maxa, maxb)
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
formulaType_t type = getNumber<formulaType_t>(L, 2);
double mina = getNumber<double>(L, 3);
double minb = getNumber<double>(L, 4);
double maxa = getNumber<double>(L, 5);
double maxb = getNumber<double>(L, 6);
combat->setPlayerCombatValues(type, mina, minb, maxa, maxb);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCombatSetArea(lua_State* L)
{
// combat:setArea(area)
if (getScriptEnv()->getScriptId() != EVENT_ID_LOADING) {
reportErrorFunc(L, "This function can only be used while loading the script.");
lua_pushnil(L);
return 1;
}
const AreaCombat* area = g_luaEnvironment.getAreaObject(getNumber<uint32_t>(L, 2));
if (!area) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_AREA_NOT_FOUND));
lua_pushnil(L);
return 1;
}
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
combat->setArea(new AreaCombat(*area));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCombatAddCondition(lua_State* L)
{
// combat:addCondition(condition)
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
Condition* condition = getUserdata<Condition>(L, 2);
if (condition) {
combat->addCondition(condition->clone());
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCombatClearConditions(lua_State* L)
{
// combat:clearConditions()
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
combat->clearConditions();
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCombatSetCallback(lua_State* L)
{
// combat:setCallback(key, function)
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
CallBackParam_t key = getNumber<CallBackParam_t>(L, 2);
if (!combat->setCallback(key)) {
lua_pushnil(L);
return 1;
}
CallBack* callback = combat->getCallback(key);
if (!callback) {
lua_pushnil(L);
return 1;
}
const std::string& function = getString(L, 3);
pushBoolean(L, callback->loadCallBack(getScriptEnv()->getScriptInterface(), function));
return 1;
}
int LuaScriptInterface::luaCombatSetOrigin(lua_State* L)
{
// combat:setOrigin(origin)
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
combat->setOrigin(getNumber<CombatOrigin>(L, 2));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCombatExecute(lua_State* L)
{
// combat:execute(creature, variant)
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
if (isUserdata(L, 2)) {
LuaDataType type = getUserdataType(L, 2);
if (type != LuaData_Player && type != LuaData_Monster && type != LuaData_Npc) {
pushBoolean(L, false);
return 1;
}
}
Creature* creature = getCreature(L, 2);
const LuaVariant& variant = getVariant(L, 3);
switch (variant.type) {
case VARIANT_NUMBER: {
Creature* target = g_game.getCreatureByID(variant.number);
if (!target) {
pushBoolean(L, false);
return 1;
}
if (combat->hasArea()) {
combat->doCombat(creature, target->getPosition());
} else {
combat->doCombat(creature, target);
}
break;
}
case VARIANT_POSITION: {
combat->doCombat(creature, variant.pos);
break;
}
case VARIANT_TARGETPOSITION: {
if (combat->hasArea()) {
combat->doCombat(creature, variant.pos);
} else {
combat->postCombatEffects(creature, variant.pos);
g_game.addMagicEffect(variant.pos, CONST_ME_POFF);
}
break;
}
case VARIANT_STRING: {
Player* target = g_game.getPlayerByName(variant.text);
if (!target) {
pushBoolean(L, false);
return 1;
}
combat->doCombat(creature, target);
break;
}
case VARIANT_NONE: {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_VARIANT_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
default: {
break;
}
}
pushBoolean(L, true);
return 1;
}
// Condition
int LuaScriptInterface::luaConditionCreate(lua_State* L)
{
// Condition(conditionType[, conditionId = CONDITIONID_COMBAT])
ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2);
ConditionId_t conditionId = getNumber<ConditionId_t>(L, 3, CONDITIONID_COMBAT);
Condition* condition = Condition::createCondition(conditionId, conditionType, 0, 0);
if (condition) {
pushUserdata<Condition>(L, condition);
setMetatable(L, -1, "Condition");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionDelete(lua_State* L)
{
// condition:delete()
Condition** conditionPtr = getRawUserdata<Condition>(L, 1);
if (conditionPtr && *conditionPtr) {
delete *conditionPtr;
*conditionPtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaConditionGetId(lua_State* L)
{
// condition:getId()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetSubId(lua_State* L)
{
// condition:getSubId()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getSubId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetType(lua_State* L)
{
// condition:getType()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetIcons(lua_State* L)
{
// condition:getIcons()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getIcons());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetEndTime(lua_State* L)
{
// condition:getEndTime()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getEndTime());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionClone(lua_State* L)
{
// condition:clone()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
pushUserdata<Condition>(L, condition->clone());
setMetatable(L, -1, "Condition");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetTicks(lua_State* L)
{
// condition:getTicks()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionSetTicks(lua_State* L)
{
// condition:setTicks(ticks)
int32_t ticks = getNumber<int32_t>(L, 2);
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
condition->setTicks(ticks);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionSetParameter(lua_State* L)
{
// condition:setParameter(key, value)
Condition* condition = getUserdata<Condition>(L, 1);
if (!condition) {
lua_pushnil(L);
return 1;
}
ConditionParam_t key = getNumber<ConditionParam_t>(L, 2);
int32_t value;
if (isBoolean(L, 3)) {
value = getBoolean(L, 3) ? 1 : 0;
} else {
value = getNumber<int32_t>(L, 3);
}
condition->setParam(key, value);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaConditionGetParameter(lua_State* L)
{
// condition:getParameter(key)
Condition* condition = getUserdata<Condition>(L, 1);
if (!condition) {
lua_pushnil(L);
return 1;
}
int32_t value = condition->getParam(getNumber<ConditionParam_t>(L, 2));
if (value == std::numeric_limits<int32_t>().max()) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, value);
return 1;
}
int LuaScriptInterface::luaConditionSetFormula(lua_State* L)
{
// condition:setFormula(mina, minb, maxa, maxb)
double maxb = getNumber<double>(L, 5);
double maxa = getNumber<double>(L, 4);
double minb = getNumber<double>(L, 3);
double mina = getNumber<double>(L, 2);
ConditionSpeed* condition = dynamic_cast<ConditionSpeed*>(getUserdata<Condition>(L, 1));
if (condition) {
condition->setFormulaVars(mina, minb, maxa, maxb);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionSetOutfit(lua_State* L)
{
// condition:setOutfit(outfit)
// condition:setOutfit(lookTypeEx, lookType, lookHead, lookBody, lookLegs, lookFeet[, lookAddons[, lookMount]])
Outfit_t outfit;
if (isTable(L, 2)) {
outfit = getOutfit(L, 2);
} else {
outfit.lookMount = getNumber<uint16_t>(L, 9, outfit.lookMount);
outfit.lookAddons = getNumber<uint8_t>(L, 8, outfit.lookAddons);
outfit.lookFeet = getNumber<uint8_t>(L, 7);
outfit.lookLegs = getNumber<uint8_t>(L, 6);
outfit.lookBody = getNumber<uint8_t>(L, 5);
outfit.lookHead = getNumber<uint8_t>(L, 4);
outfit.lookType = getNumber<uint16_t>(L, 3);
outfit.lookTypeEx = getNumber<uint16_t>(L, 2);
}
ConditionOutfit* condition = dynamic_cast<ConditionOutfit*>(getUserdata<Condition>(L, 1));
if (condition) {
condition->setOutfit(outfit);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionAddDamage(lua_State* L)
{
// condition:addDamage(rounds, time, value)
int32_t value = getNumber<int32_t>(L, 4);
int32_t time = getNumber<int32_t>(L, 3);
int32_t rounds = getNumber<int32_t>(L, 2);
ConditionDamage* condition = dynamic_cast<ConditionDamage*>(getUserdata<Condition>(L, 1));
if (condition) {
pushBoolean(L, condition->addDamage(rounds, time, value));
} else {
lua_pushnil(L);
}
return 1;
}
// Outfit
int LuaScriptInterface::luaOutfitCreate(lua_State* L)
{
// Outfit(looktype)
const Outfit* outfit = Outfits::getInstance().getOutfitByLookType(getNumber<uint16_t>(L, 2));
if (outfit) {
pushOutfit(L, outfit);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaOutfitCompare(lua_State* L)
{
// outfit == outfitEx
Outfit outfitEx = getOutfitClass(L, 2);
Outfit outfit = getOutfitClass(L, 1);
pushBoolean(L, outfit == outfitEx);
return 1;
}
// MonsterType
int LuaScriptInterface::luaMonsterTypeCreate(lua_State* L)
{
// MonsterType(name)
MonsterType* monsterType = g_monsters.getMonsterType(getString(L, 2));
if (monsterType) {
pushUserdata<MonsterType>(L, monsterType);
setMetatable(L, -1, "MonsterType");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsAttackable(lua_State* L)
{
// get: monsterType:isAttackable() set: monsterType:isAttackable(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isAttackable);
} else {
monsterType->info.isAttackable = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsChallengeable(lua_State* L)
{
// get: monsterType:isChallengeable() set: monsterType:isChallengeable(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isChallengeable);
} else {
monsterType->info.isChallengeable = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsConvinceable(lua_State* L)
{
// get: monsterType:isConvinceable() set: monsterType:isConvinceable(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isConvinceable);
} else {
monsterType->info.isConvinceable = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsSummonable(lua_State* L)
{
// get: monsterType:isSummonable() set: monsterType:isSummonable(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isSummonable);
} else {
monsterType->info.isSummonable = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsIgnoringSpawnBlock(lua_State* L)
{
// get: monsterType:isIgnoringSpawnBlock() set: monsterType:isIgnoringSpawnBlock(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isIgnoringSpawnBlock);
} else {
monsterType->info.isIgnoringSpawnBlock = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsIllusionable(lua_State* L)
{
// get: monsterType:isIllusionable() set: monsterType:isIllusionable(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isIllusionable);
} else {
monsterType->info.isIllusionable = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsHostile(lua_State* L)
{
// get: monsterType:isHostile() set: monsterType:isHostile(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isHostile);
} else {
monsterType->info.isHostile = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsPushable(lua_State* L)
{
// get: monsterType:isPushable() set: monsterType:isPushable(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.pushable);
} else {
monsterType->info.pushable = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsHealthHidden(lua_State* L)
{
// get: monsterType:isHealthHidden() set: monsterType:isHealthHidden(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.hiddenHealth);
} else {
monsterType->info.hiddenHealth = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsBoss(lua_State* L)
{
// get: monsterType:isBoss() set: monsterType:isBoss(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isBoss);
} else {
monsterType->info.isBoss = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCanPushItems(lua_State* L)
{
// get: monsterType:canPushItems() set: monsterType:canPushItems(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.canPushItems);
} else {
monsterType->info.canPushItems = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCanPushCreatures(lua_State* L)
{
// get: monsterType:canPushCreatures() set: monsterType:canPushCreatures(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.canPushCreatures);
} else {
monsterType->info.canPushCreatures = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCanWalkOnEnergy(lua_State* L)
{
// get: monsterType:canWalkOnEnergy() set: monsterType:canWalkOnEnergy(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.canWalkOnEnergy);
} else {
monsterType->info.canWalkOnEnergy = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCanWalkOnFire(lua_State* L)
{
// get: monsterType:canWalkOnFire() set: monsterType:canWalkOnFire(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.canWalkOnFire);
} else {
monsterType->info.canWalkOnFire = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCanWalkOnPoison(lua_State* L)
{
// get: monsterType:canWalkOnPoison() set: monsterType:canWalkOnPoison(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.canWalkOnPoison);
} else {
monsterType->info.canWalkOnPoison = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int32_t LuaScriptInterface::luaMonsterTypeName(lua_State* L)
{
// get: monsterType:name() set: monsterType:name(name)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushString(L, monsterType->name);
} else {
monsterType->name = getString(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeNameDescription(lua_State* L)
{
// get: monsterType:nameDescription() set: monsterType:nameDescription(desc)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushString(L, monsterType->nameDescription);
} else {
monsterType->nameDescription = getString(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeHealth(lua_State* L)
{
// get: monsterType:health() set: monsterType:health(health)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.health);
} else {
monsterType->info.health = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeMaxHealth(lua_State* L)
{
// get: monsterType:maxHealth() set: monsterType:maxHealth(health)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.healthMax);
} else {
monsterType->info.healthMax = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeRunHealth(lua_State* L)
{
// get: monsterType:runHealth() set: monsterType:runHealth(health)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.runAwayHealth);
} else {
monsterType->info.runAwayHealth = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeExperience(lua_State* L)
{
// get: monsterType:experience() set: monsterType:experience(exp)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.experience);
} else {
monsterType->info.experience = getNumber<uint64_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeSkull(lua_State* L)
{
// get: monsterType:skull() set: monsterType:skull(str/constant)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.skull);
} else {
if (isNumber(L, 2)) {
monsterType->info.skull = getNumber<Skulls_t>(L, 2);
} else {
monsterType->info.skull = getSkullType(getString(L, 2));
}
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCombatImmunities(lua_State* L)
{
// get: monsterType:combatImmunities() set: monsterType:combatImmunities(immunity)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.damageImmunities);
} else {
std::string immunity = getString(L, 2);
if (immunity == "physical") {
monsterType->info.damageImmunities |= COMBAT_PHYSICALDAMAGE;
pushBoolean(L, true);
} else if (immunity == "energy") {
monsterType->info.damageImmunities |= COMBAT_ENERGYDAMAGE;
pushBoolean(L, true);
} else if (immunity == "fire") {
monsterType->info.damageImmunities |= COMBAT_FIREDAMAGE;
pushBoolean(L, true);
} else if (immunity == "poison" || immunity == "earth") {
monsterType->info.damageImmunities |= COMBAT_EARTHDAMAGE;
pushBoolean(L, true);
} else if (immunity == "drown") {
monsterType->info.damageImmunities |= COMBAT_DROWNDAMAGE;
pushBoolean(L, true);
} else if (immunity == "ice") {
monsterType->info.damageImmunities |= COMBAT_ICEDAMAGE;
pushBoolean(L, true);
} else if (immunity == "holy") {
monsterType->info.damageImmunities |= COMBAT_HOLYDAMAGE;
pushBoolean(L, true);
} else if (immunity == "death") {
monsterType->info.damageImmunities |= COMBAT_DEATHDAMAGE;
pushBoolean(L, true);
} else if (immunity == "lifedrain") {
monsterType->info.damageImmunities |= COMBAT_LIFEDRAIN;
pushBoolean(L, true);
} else if (immunity == "manadrain") {
monsterType->info.damageImmunities |= COMBAT_MANADRAIN;
pushBoolean(L, true);
} else {
std::cout << "[Warning - Monsters::loadMonster] Unknown immunity name " << immunity << " for monster: " << monsterType->name << std::endl;
lua_pushnil(L);
}
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeConditionImmunities(lua_State* L)
{
// get: monsterType:conditionImmunities() set: monsterType:conditionImmunities(immunity)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.conditionImmunities);
} else {
std::string immunity = getString(L, 2);
if (immunity == "physical") {
monsterType->info.conditionImmunities |= CONDITION_BLEEDING;
pushBoolean(L, true);
} else if (immunity == "energy") {
monsterType->info.conditionImmunities |= CONDITION_ENERGY;
pushBoolean(L, true);
} else if (immunity == "fire") {
monsterType->info.conditionImmunities |= CONDITION_FIRE;
pushBoolean(L, true);
} else if (immunity == "poison" || immunity == "earth") {
monsterType->info.conditionImmunities |= CONDITION_POISON;
pushBoolean(L, true);
} else if (immunity == "drown") {
monsterType->info.conditionImmunities |= CONDITION_DROWN;
pushBoolean(L, true);
} else if (immunity == "ice") {
monsterType->info.conditionImmunities |= CONDITION_FREEZING;
pushBoolean(L, true);
} else if (immunity == "holy") {
monsterType->info.conditionImmunities |= CONDITION_DAZZLED;
pushBoolean(L, true);
} else if (immunity == "death") {
monsterType->info.conditionImmunities |= CONDITION_CURSED;
pushBoolean(L, true);
} else if (immunity == "paralyze") {
monsterType->info.conditionImmunities |= CONDITION_PARALYZE;
pushBoolean(L, true);
} else if (immunity == "outfit") {
monsterType->info.conditionImmunities |= CONDITION_OUTFIT;
pushBoolean(L, true);
} else if (immunity == "drunk") {
monsterType->info.conditionImmunities |= CONDITION_DRUNK;
pushBoolean(L, true);
} else if (immunity == "invisible" || immunity == "invisibility") {
monsterType->info.conditionImmunities |= CONDITION_INVISIBLE;
pushBoolean(L, true);
} else if (immunity == "bleed") {
monsterType->info.conditionImmunities |= CONDITION_BLEEDING;
pushBoolean(L, true);
} else {
std::cout << "[Warning - Monsters::loadMonster] Unknown immunity name " << immunity << " for monster: " << monsterType->name << std::endl;
lua_pushnil(L);
}
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetAttackList(lua_State* L)
{
// monsterType:getAttackList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, monsterType->info.attackSpells.size(), 0);
int index = 0;
for (const auto& spellBlock : monsterType->info.attackSpells) {
lua_createtable(L, 0, 8);
setField(L, "chance", spellBlock.chance);
setField(L, "isCombatSpell", spellBlock.combatSpell ? 1 : 0);
setField(L, "isMelee", spellBlock.isMelee ? 1 : 0);
setField(L, "minCombatValue", spellBlock.minCombatValue);
setField(L, "maxCombatValue", spellBlock.maxCombatValue);
setField(L, "range", spellBlock.range);
setField(L, "speed", spellBlock.speed);
pushUserdata<CombatSpell>(L, static_cast<CombatSpell*>(spellBlock.spell));
lua_setfield(L, -2, "spell");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeAddAttack(lua_State* L)
{
// monsterType:addAttack(monsterspell)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 2);
if (spell) {
spellBlock_t sb;
if (g_monsters.deserializeSpell(spell, sb, monsterType->name)) {
monsterType->info.attackSpells.push_back(std::move(sb));
} else {
std::cout << monsterType->name << std::endl;
std::cout << "[Warning - Monsters::loadMonster] Cant load spell. " << spell->name << std::endl;
}
} else {
lua_pushnil(L);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetDefenseList(lua_State* L)
{
// monsterType:getDefenseList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, monsterType->info.defenseSpells.size(), 0);
int index = 0;
for (const auto& spellBlock : monsterType->info.defenseSpells) {
lua_createtable(L, 0, 8);
setField(L, "chance", spellBlock.chance);
setField(L, "isCombatSpell", spellBlock.combatSpell ? 1 : 0);
setField(L, "isMelee", spellBlock.isMelee ? 1 : 0);
setField(L, "minCombatValue", spellBlock.minCombatValue);
setField(L, "maxCombatValue", spellBlock.maxCombatValue);
setField(L, "range", spellBlock.range);
setField(L, "speed", spellBlock.speed);
pushUserdata<CombatSpell>(L, static_cast<CombatSpell*>(spellBlock.spell));
lua_setfield(L, -2, "spell");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeAddDefense(lua_State* L)
{
// monsterType:addDefense(monsterspell)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 2);
if (spell) {
spellBlock_t sb;
if (g_monsters.deserializeSpell(spell, sb, monsterType->name)) {
monsterType->info.defenseSpells.push_back(std::move(sb));
} else {
std::cout << monsterType->name << std::endl;
std::cout << "[Warning - Monsters::loadMonster] Cant load spell. " << spell->name << std::endl;
}
} else {
lua_pushnil(L);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetElementList(lua_State* L)
{
// monsterType:getElementList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, monsterType->info.elementMap.size(), 0);
for (const auto& elementEntry : monsterType->info.elementMap) {
lua_pushnumber(L, elementEntry.second);
lua_rawseti(L, -2, elementEntry.first);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeAddElement(lua_State* L)
{
// monsterType:addElement(type, percent)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
CombatType_t element = getNumber<CombatType_t>(L, 2);
monsterType->info.elementMap[element] = getNumber<int32_t>(L, 3);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetVoices(lua_State* L)
{
// monsterType:getVoices()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, monsterType->info.voiceVector.size(), 0);
for (const auto& voiceBlock : monsterType->info.voiceVector) {
lua_createtable(L, 0, 2);
setField(L, "text", voiceBlock.text);
setField(L, "yellText", voiceBlock.yellText);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeAddVoice(lua_State* L)
{
// monsterType:addVoice(sentence, interval, chance, yell)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
voiceBlock_t voice;
voice.text = getString(L, 2);
monsterType->info.yellSpeedTicks = getNumber<uint32_t>(L, 3);
monsterType->info.yellChance = getNumber<uint32_t>(L, 4);
voice.yellText = getBoolean(L, 5);
monsterType->info.voiceVector.push_back(voice);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetLoot(lua_State* L)
{
// monsterType:getLoot()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
pushLoot(L, monsterType->info.lootItems);
return 1;
}
int LuaScriptInterface::luaMonsterTypeAddLoot(lua_State* L)
{
// monsterType:addLoot(loot)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
Loot* loot = getUserdata<Loot>(L, 2);
if (loot) {
monsterType->loadLoot(monsterType, loot->lootBlock);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetCreatureEvents(lua_State* L)
{
// monsterType:getCreatureEvents()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, monsterType->info.scripts.size(), 0);
for (const std::string& creatureEvent : monsterType->info.scripts) {
pushString(L, creatureEvent);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeRegisterEvent(lua_State* L)
{
// monsterType:registerEvent(name)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
monsterType->info.scripts.push_back(getString(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeEventOnCallback(lua_State* L)
{
// monsterType:onThink(callback)
// monsterType:onAppear(callback)
// monsterType:onDisappear(callback)
// monsterType:onMove(callback)
// monsterType:onSay(callback)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (monsterType->loadCallback(&g_scripts->getScriptInterface())) {
pushBoolean(L, true);
return 1;
}
pushBoolean(L, false);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeEventType(lua_State* L)
{
// monstertype:eventType(event)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
monsterType->info.eventType = getNumber<MonstersEvent_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetSummonList(lua_State* L)
{
// monsterType:getSummonList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, monsterType->info.summons.size(), 0);
for (const auto& summonBlock : monsterType->info.summons) {
lua_createtable(L, 0, 3);
setField(L, "name", summonBlock.name);
setField(L, "speed", summonBlock.speed);
setField(L, "chance", summonBlock.chance);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeAddSummon(lua_State* L)
{
// monsterType:addSummon(name, interval, chance)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
summonBlock_t summon;
summon.name = getString(L, 2);
summon.chance = getNumber<int32_t>(L, 3);
summon.speed = getNumber<int32_t>(L, 4);
monsterType->info.summons.push_back(summon);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeMaxSummons(lua_State* L)
{
// get: monsterType:maxSummons() set: monsterType:maxSummons(ammount)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.maxSummons);
} else {
monsterType->info.maxSummons = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeArmor(lua_State* L)
{
// get: monsterType:armor() set: monsterType:armor(armor)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.armor);
} else {
monsterType->info.armor = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeDefense(lua_State* L)
{
// get: monsterType:defense() set: monsterType:defense(defense)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.defense);
} else {
monsterType->info.defense = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeOutfit(lua_State* L)
{
// get: monsterType:outfit() set: monsterType:outfit(outfit)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushOutfit(L, monsterType->info.outfit);
} else {
monsterType->info.outfit = getOutfit(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeRace(lua_State* L)
{
// get: monsterType:race() set: monsterType:race(race)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
std::string race = getString(L, 2);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.race);
} else {
if (race == "venom") {
monsterType->info.race = RACE_VENOM;
} else if (race == "blood") {
monsterType->info.race = RACE_BLOOD;
} else if (race == "undead") {
monsterType->info.race = RACE_UNDEAD;
} else if (race == "fire") {
monsterType->info.race = RACE_FIRE;
} else if (race == "energy") {
monsterType->info.race = RACE_ENERGY;
} else {
std::cout << "[Warning - Monsters::loadMonster] Unknown race type " << race << "." << std::endl;
lua_pushnil(L);
return 1;
}
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCorpseId(lua_State* L)
{
// get: monsterType:corpseId() set: monsterType:corpseId(id)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.lookcorpse);
} else {
monsterType->info.lookcorpse = getNumber<uint16_t>(L, 2);
lua_pushboolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeManaCost(lua_State* L)
{
// get: monsterType:manaCost() set: monsterType:manaCost(mana)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.manaCost);
} else {
monsterType->info.manaCost = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeBaseSpeed(lua_State* L)
{
// get: monsterType:baseSpeed() set: monsterType:baseSpeed(speed)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.baseSpeed);
} else {
monsterType->info.baseSpeed = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeLight(lua_State* L)
{
// get: monsterType:light() set: monsterType:light(color, level)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.light.level);
lua_pushnumber(L, monsterType->info.light.color);
return 2;
} else {
monsterType->info.light.color = getNumber<uint8_t>(L, 2);
monsterType->info.light.level = getNumber<uint8_t>(L, 3);
pushBoolean(L, true);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeStaticAttackChance(lua_State* L)
{
// get: monsterType:staticAttackChance() set: monsterType:staticAttackChance(chance)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.staticAttackChance);
} else {
monsterType->info.staticAttackChance = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeTargetDistance(lua_State* L)
{
// get: monsterType:targetDistance() set: monsterType:targetDistance(distance)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.targetDistance);
} else {
monsterType->info.targetDistance = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeYellChance(lua_State* L)
{
// get: monsterType:yellChance() set: monsterType:yellChance(chance)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.yellChance);
} else {
monsterType->info.yellChance = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeYellSpeedTicks(lua_State* L)
{
// get: monsterType:yellSpeedTicks() set: monsterType:yellSpeedTicks(rate)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.yellSpeedTicks);
} else {
monsterType->info.yellSpeedTicks = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeChangeTargetChance(lua_State* L)
{
// get: monsterType:changeTargetChance() set: monsterType:changeTargetChance(chance)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.changeTargetChance);
} else {
monsterType->info.changeTargetChance = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeChangeTargetSpeed(lua_State* L)
{
// get: monsterType:changeTargetSpeed() set: monsterType:changeTargetSpeed(speed)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.changeTargetSpeed);
} else {
monsterType->info.changeTargetSpeed = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// Loot
int LuaScriptInterface::luaCreateLoot(lua_State* L)
{
// Loot() will create a new loot item
Loot* loot = new Loot();
if (loot) {
pushUserdata<Loot>(L, loot);
setMetatable(L, -1, "Loot");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaDeleteLoot(lua_State* L)
{
// loot:delete() loot:__gc()
Loot** lootPtr = getRawUserdata<Loot>(L, 1);
if (lootPtr && *lootPtr) {
delete *lootPtr;
*lootPtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaLootSetId(lua_State* L)
{
// loot:setId(id or name)
Loot* loot = getUserdata<Loot>(L, 1);
if (loot) {
if (isNumber(L, 2)) {
loot->lootBlock.id = getNumber<uint16_t>(L, 2);
} else {
auto name = getString(L, 2);
auto ids = Item::items.nameToItems.equal_range(asLowerCaseString(name));
if (ids.first == Item::items.nameToItems.cend()) {
std::cout << "[Warning - Loot:setId] Unknown loot item \"" << name << "\". " << std::endl;
pushBoolean(L, false);
return 1;
}
if (std::next(ids.first) != ids.second) {
std::cout << "[Warning - Loot:setId] Non-unique loot item \"" << name << "\". " << std::endl;
pushBoolean(L, false);
return 1;
}
loot->lootBlock.id = ids.first->second;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaLootSetSubType(lua_State* L)
{
// loot:setSubType(type)
Loot* loot = getUserdata<Loot>(L, 1);
if (loot) {
loot->lootBlock.subType = getNumber<uint16_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaLootSetChance(lua_State* L)
{
// loot:setChance(chance)
Loot* loot = getUserdata<Loot>(L, 1);
if (loot) {
loot->lootBlock.chance = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaLootSetMaxCount(lua_State* L)
{
// loot:setMaxCount(max)
Loot* loot = getUserdata<Loot>(L, 1);
if (loot) {
loot->lootBlock.countmax = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaLootSetActionId(lua_State* L)
{
// loot:setActionId(actionid)
Loot* loot = getUserdata<Loot>(L, 1);
if (loot) {
loot->lootBlock.actionId = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaLootSetDescription(lua_State* L)
{
// loot:setDescription(desc)
Loot* loot = getUserdata<Loot>(L, 1);
if (loot) {
loot->lootBlock.text = getString(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaLootAddChildLoot(lua_State* L)
{
// loot:addChildLoot(loot)
Loot* loot = getUserdata<Loot>(L, 1);
if (loot) {
loot->lootBlock.childLoot.push_back(getUserdata<Loot>(L, 2)->lootBlock);
} else {
lua_pushnil(L);
}
return 1;
}
// MonsterSpell
int LuaScriptInterface::luaCreateMonsterSpell(lua_State* L)
{
// MonsterSpell() will create a new Monster Spell
MonsterSpell* spell = new MonsterSpell();
if (spell) {
pushUserdata<MonsterSpell>(L, spell);
setMetatable(L, -1, "MonsterSpell");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaDeleteMonsterSpell(lua_State* L)
{
// monsterSpell:delete() monsterSpell:__gc()
MonsterSpell** monsterSpellPtr = getRawUserdata<MonsterSpell>(L, 1);
if (monsterSpellPtr && *monsterSpellPtr) {
delete *monsterSpellPtr;
*monsterSpellPtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaMonsterSpellSetType(lua_State* L)
{
// monsterSpell:setType(type)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->name = getString(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetScriptName(lua_State* L)
{
// monsterSpell:setScriptName(name)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->scriptName = getString(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetChance(lua_State* L)
{
// monsterSpell:setChance(chance)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->chance = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetInterval(lua_State* L)
{
// monsterSpell:setInterval(interval)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->interval = getNumber<uint16_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetRange(lua_State* L)
{
// monsterSpell:setRange(range)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->range = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetCombatValue(lua_State* L)
{
// monsterSpell:setCombatValue(min, max)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->minCombatValue = getNumber<int32_t>(L, 2);
spell->maxCombatValue = getNumber<int32_t>(L, 3);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetCombatType(lua_State* L)
{
// monsterSpell:setCombatType(combatType_t)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->combatType = getNumber<CombatType_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetAttackValue(lua_State* L)
{
// monsterSpell:setAttackValue(attack, skill)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->attack = getNumber<int32_t>(L, 2);
spell->skill = getNumber<int32_t>(L, 3);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetNeedTarget(lua_State* L)
{
// monsterSpell:setNeedTarget(bool)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->needTarget = getBoolean(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetNeedDirection(lua_State* L)
{
// monsterSpell:setNeedDirection(bool)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->needDirection = getBoolean(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetCombatLength(lua_State* L)
{
// monsterSpell:setCombatLength(length)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->length = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetCombatSpread(lua_State* L)
{
// monsterSpell:setCombatSpread(spread)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->spread = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetCombatRadius(lua_State* L)
{
// monsterSpell:setCombatRadius(radius)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->radius = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetConditionType(lua_State* L)
{
// monsterSpell:setConditionType(type)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->conditionType = getNumber<ConditionType_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetConditionDamage(lua_State* L)
{
// monsterSpell:setConditionDamage(min, max, start)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->conditionMinDamage = getNumber<int32_t>(L, 2);
spell->conditionMaxDamage = getNumber<int32_t>(L, 3);
spell->conditionStartDamage = getNumber<int32_t>(L, 4);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetConditionSpeedChange(lua_State* L)
{
// monsterSpell:setConditionSpeedChange(minSpeed[, maxSpeed])
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->minSpeedChange = getNumber<int32_t>(L, 2);
spell->maxSpeedChange = getNumber<int32_t>(L, 3, 0);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetConditionDuration(lua_State* L)
{
// monsterSpell:setConditionDuration(duration)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->duration = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetConditionDrunkenness(lua_State* L)
{
// monsterSpell:setConditionDrunkenness(drunkenness)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->drunkenness = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetConditionTickInterval(lua_State* L)
{
// monsterSpell:setConditionTickInterval(interval)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->tickInterval = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetCombatShootEffect(lua_State* L)
{
// monsterSpell:setCombatShootEffect(effect)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->shoot = getNumber<ShootType_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetCombatEffect(lua_State* L)
{
// monsterSpell:setCombatEffect(effect)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->effect = getNumber<MagicEffectClasses>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Party
int32_t LuaScriptInterface::luaPartyCreate(lua_State* L)
{
// Party(userdata)
Player* player = getUserdata<Player>(L, 2);
if (!player) {
lua_pushnil(L);
return 1;
}
Party* party = player->getParty();
if (!party) {
party = new Party(player);
g_game.updatePlayerShield(player);
player->sendCreatureSkull(player);
pushUserdata<Party>(L, party);
setMetatable(L, -1, "Party");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyDisband(lua_State* L)
{
// party:disband()
Party** partyPtr = getRawUserdata<Party>(L, 1);
if (partyPtr && *partyPtr) {
Party*& party = *partyPtr;
party->disband();
party = nullptr;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetLeader(lua_State* L)
{
// party:getLeader()
Party* party = getUserdata<Party>(L, 1);
if (!party) {
lua_pushnil(L);
return 1;
}
Player* leader = party->getLeader();
if (leader) {
pushUserdata<Player>(L, leader);
setMetatable(L, -1, "Player");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartySetLeader(lua_State* L)
{
// party:setLeader(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->passPartyLeadership(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetMembers(lua_State* L)
{
// party:getMembers()
Party* party = getUserdata<Party>(L, 1);
if (!party) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, party->getMemberCount(), 0);
for (Player* player : party->getMembers()) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaPartyGetMemberCount(lua_State* L)
{
// party:getMemberCount()
Party* party = getUserdata<Party>(L, 1);
if (party) {
lua_pushnumber(L, party->getMemberCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetInvitees(lua_State* L)
{
// party:getInvitees()
Party* party = getUserdata<Party>(L, 1);
if (party) {
lua_createtable(L, party->getInvitationCount(), 0);
int index = 0;
for (Player* player : party->getInvitees()) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetInviteeCount(lua_State* L)
{
// party:getInviteeCount()
Party* party = getUserdata<Party>(L, 1);
if (party) {
lua_pushnumber(L, party->getInvitationCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyAddInvite(lua_State* L)
{
// party:addInvite(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->invitePlayer(*player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyRemoveInvite(lua_State* L)
{
// party:removeInvite(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->removeInvite(*player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyAddMember(lua_State* L)
{
// party:addMember(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->joinParty(*player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyRemoveMember(lua_State* L)
{
// party:removeMember(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->leaveParty(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyIsSharedExperienceActive(lua_State* L)
{
// party:isSharedExperienceActive()
Party* party = getUserdata<Party>(L, 1);
if (party) {
pushBoolean(L, party->isSharedExperienceActive());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyIsSharedExperienceEnabled(lua_State* L)
{
// party:isSharedExperienceEnabled()
Party* party = getUserdata<Party>(L, 1);
if (party) {
pushBoolean(L, party->isSharedExperienceEnabled());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyShareExperience(lua_State* L)
{
// party:shareExperience(experience)
uint64_t experience = getNumber<uint64_t>(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party) {
party->shareExperience(experience);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartySetSharedExperience(lua_State* L)
{
// party:setSharedExperience(active)
bool active = getBoolean(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party) {
pushBoolean(L, party->setSharedExperience(party->getLeader(), active));
} else {
lua_pushnil(L);
}
return 1;
}
// Spells
int LuaScriptInterface::luaSpellCreate(lua_State* L)
{
// Spell(words, name or id) to get an existing spell
// Spell(type) ex: Spell(SPELL_INSTANT) or Spell(SPELL_RUNE) to create a new spell
if (lua_gettop(L) == 1) {
std::cout << "[Error - Spell::luaSpellCreate] There is no parameter set!" << std::endl;
lua_pushnil(L);
return 1;
}
SpellType_t spellType = SPELL_UNDEFINED;
if (isNumber(L, 2)) {
int32_t id = getNumber<int32_t>(L, 2);
RuneSpell* rune = g_spells->getRuneSpell(id);
if (rune) {
pushUserdata<Spell>(L, rune);
setMetatable(L, -1, "Spell");
return 1;
}
spellType = static_cast<SpellType_t>(id);
} else if (isString(L, 2)) {
std::string arg = getString(L, 2);
InstantSpell* instant = g_spells->getInstantSpellByName(arg);
if (instant) {
pushUserdata<Spell>(L, instant);
setMetatable(L, -1, "Spell");
return 1;
}
instant = g_spells->getInstantSpell(arg);
if (instant) {
pushUserdata<Spell>(L, instant);
setMetatable(L, -1, "Spell");
return 1;
}
RuneSpell* rune = g_spells->getRuneSpellByName(arg);
if (rune) {
pushUserdata<Spell>(L, rune);
setMetatable(L, -1, "Spell");
return 1;
}
std::string tmp = asLowerCaseString(arg);
if (tmp == "instant") {
spellType = SPELL_INSTANT;
} else if (tmp == "rune") {
spellType = SPELL_RUNE;
}
}
if (spellType == SPELL_INSTANT) {
InstantSpell* spell = new InstantSpell(getScriptEnv()->getScriptInterface());
spell->fromLua = true;
pushUserdata<Spell>(L, spell);
setMetatable(L, -1, "Spell");
spell->spellType = SPELL_INSTANT;
return 1;
} else if (spellType == SPELL_RUNE) {
RuneSpell* spell = new RuneSpell(getScriptEnv()->getScriptInterface());
spell->fromLua = true;
pushUserdata<Spell>(L, spell);
setMetatable(L, -1, "Spell");
spell->spellType = SPELL_RUNE;
return 1;
}
lua_pushnil(L);
return 1;
}
int LuaScriptInterface::luaSpellOnCastSpell(lua_State* L)
{
// spell:onCastSpell(callback)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (spell->spellType == SPELL_INSTANT) {
InstantSpell* instant = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (!instant->loadCallback()) {
pushBoolean(L, false);
return 1;
}
instant->scripted = true;
pushBoolean(L, true);
} else if (spell->spellType == SPELL_RUNE) {
RuneSpell* rune = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
if (!rune->loadCallback()) {
pushBoolean(L, false);
return 1;
}
rune->scripted = true;
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellRegister(lua_State* L)
{
// spell:register()
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (spell->spellType == SPELL_INSTANT) {
InstantSpell* instant = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (!instant->isScripted()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, g_spells->registerInstantLuaEvent(instant));
} else if (spell->spellType == SPELL_RUNE) {
RuneSpell* rune = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
if (rune->getMagicLevel() != 0 || rune->getLevel() != 0) {
//Change information in the ItemType to get accurate description
ItemType& iType = Item::items.getItemType(rune->getRuneItemId());
iType.name = rune->getName();
iType.runeMagLevel = rune->getMagicLevel();
iType.runeLevel = rune->getLevel();
iType.charges = rune->getCharges();
}
if (!rune->isScripted()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, g_spells->registerRuneLuaEvent(rune));
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellName(lua_State* L)
{
// spell:name(name)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushString(L, spell->getName());
} else {
spell->setName(getString(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellId(lua_State* L)
{
// spell:id(id)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getId());
} else {
spell->setId(getNumber<uint8_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellGroup(lua_State* L)
{
// spell:group(primaryGroup[, secondaryGroup])
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getGroup());
lua_pushnumber(L, spell->getSecondaryGroup());
return 2;
} else if (lua_gettop(L) == 2) {
SpellGroup_t group = getNumber<SpellGroup_t>(L, 2);
if (group) {
spell->setGroup(group);
pushBoolean(L, true);
} else if (isString(L, 2)) {
group = stringToSpellGroup(getString(L, 2));
if (group != SPELLGROUP_NONE) {
spell->setGroup(group);
} else {
std::cout << "[Warning - Spell::group] Unknown group: " << getString(L, 2) << std::endl;
pushBoolean(L, false);
return 1;
}
pushBoolean(L, true);
} else {
std::cout << "[Warning - Spell::group] Unknown group: " << getString(L, 2) << std::endl;
pushBoolean(L, false);
return 1;
}
} else {
SpellGroup_t primaryGroup = getNumber<SpellGroup_t>(L, 2);
SpellGroup_t secondaryGroup = getNumber<SpellGroup_t>(L, 2);
if (primaryGroup && secondaryGroup) {
spell->setGroup(primaryGroup);
spell->setSecondaryGroup(secondaryGroup);
pushBoolean(L, true);
} else if (isString(L, 2) && isString(L, 3)) {
primaryGroup = stringToSpellGroup(getString(L, 2));
if (primaryGroup != SPELLGROUP_NONE) {
spell->setGroup(primaryGroup);
} else {
std::cout << "[Warning - Spell::group] Unknown primaryGroup: " << getString(L, 2) << std::endl;
pushBoolean(L, false);
return 1;
}
secondaryGroup = stringToSpellGroup(getString(L, 3));
if (secondaryGroup != SPELLGROUP_NONE) {
spell->setSecondaryGroup(secondaryGroup);
} else {
std::cout << "[Warning - Spell::group] Unknown secondaryGroup: " << getString(L, 3) << std::endl;
pushBoolean(L, false);
return 1;
}
pushBoolean(L, true);
} else {
std::cout << "[Warning - Spell::group] Unknown primaryGroup: " << getString(L, 2) << " or secondaryGroup: " << getString(L, 3) << std::endl;
pushBoolean(L, false);
return 1;
}
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellCooldown(lua_State* L)
{
// spell:cooldown(cooldown)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getCooldown());
} else {
spell->setCooldown(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellGroupCooldown(lua_State* L)
{
// spell:groupCooldown(primaryGroupCd[, secondaryGroupCd])
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getGroupCooldown());
lua_pushnumber(L, spell->getSecondaryCooldown());
return 2;
} else if (lua_gettop(L) == 2) {
spell->setGroupCooldown(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
spell->setGroupCooldown(getNumber<uint32_t>(L, 2));
spell->setSecondaryCooldown(getNumber<uint32_t>(L, 3));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellLevel(lua_State* L)
{
// spell:level(lvl)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getLevel());
} else {
spell->setLevel(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellMagicLevel(lua_State* L)
{
// spell:magicLevel(lvl)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getMagicLevel());
} else {
spell->setMagicLevel(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellMana(lua_State* L)
{
// spell:mana(mana)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getMana());
} else {
spell->setMana(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellManaPercent(lua_State* L)
{
// spell:manaPercent(percent)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getManaPercent());
} else {
spell->setManaPercent(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellSoul(lua_State* L)
{
// spell:soul(soul)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getSoulCost());
} else {
spell->setSoulCost(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellRange(lua_State* L)
{
// spell:range(range)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getRange());
} else {
spell->setRange(getNumber<int32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellPremium(lua_State* L)
{
// spell:isPremium(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->isPremium());
} else {
spell->setPremium(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellEnabled(lua_State* L)
{
// spell:isEnabled(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->isEnabled());
} else {
spell->setEnabled(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellNeedTarget(lua_State* L)
{
// spell:needTarget(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getNeedTarget());
} else {
spell->setNeedTarget(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellNeedWeapon(lua_State* L)
{
// spell:needWeapon(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getNeedWeapon());
} else {
spell->setNeedWeapon(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellNeedLearn(lua_State* L)
{
// spell:needLearn(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getNeedLearn());
} else {
spell->setNeedLearn(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellSelfTarget(lua_State* L)
{
// spell:isSelfTarget(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getSelfTarget());
} else {
spell->setSelfTarget(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellBlocking(lua_State* L)
{
// spell:isBlocking(blockingSolid, blockingCreature)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getBlockingSolid());
pushBoolean(L, spell->getBlockingCreature());
return 2;
} else {
spell->setBlockingSolid(getBoolean(L, 2));
spell->setBlockingCreature(getBoolean(L, 3));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellAggressive(lua_State* L)
{
// spell:isAggressive(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getAggressive());
} else {
spell->setAggressive(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellPzLock(lua_State* L)
{
// spell:isPzLock(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getPzLock());
} else {
spell->setPzLock(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellVocation(lua_State* L)
{
// spell:vocation(vocation)
Spell* spell = getUserdata<Spell>(L, 1);
if (!spell) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
lua_createtable(L, 0, 0);
int i = 0;
for (auto& voc : spell->getVocMap()) {
std::string name = g_vocations.getVocation(voc.first)->getVocName();
pushString(L, name);
lua_rawseti(L, -2, ++i);
}
} else {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
for (int i = 0; i < parameters; ++i) {
std::vector<std::string> vocList = explodeString(getString(L, 2 + i), ";");
spell->addVocMap(g_vocations.getVocationId(vocList[0]), vocList.size() > 1 ? booleanString(vocList[1]) : false);
}
pushBoolean(L, true);
}
return 1;
}
// only for InstantSpells
int LuaScriptInterface::luaSpellWords(lua_State* L)
{
// spell:words(words[, separator = ""])
InstantSpell* spell = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_INSTANT, it means that this actually is no InstantSpell, so we return nil
if (spell->spellType != SPELL_INSTANT) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushString(L, spell->getWords());
pushString(L, spell->getSeparator());
return 2;
} else {
std::string sep = "";
if (lua_gettop(L) == 3) {
sep = getString(L, 3);
}
spell->setWords(getString(L, 2));
spell->setSeparator(sep);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for InstantSpells
int LuaScriptInterface::luaSpellNeedDirection(lua_State* L)
{
// spell:needDirection(bool)
InstantSpell* spell = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_INSTANT, it means that this actually is no InstantSpell, so we return nil
if (spell->spellType != SPELL_INSTANT) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getNeedDirection());
} else {
spell->setNeedDirection(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for InstantSpells
int LuaScriptInterface::luaSpellHasParams(lua_State* L)
{
// spell:hasParams(bool)
InstantSpell* spell = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_INSTANT, it means that this actually is no InstantSpell, so we return nil
if (spell->spellType != SPELL_INSTANT) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getHasParam());
} else {
spell->setHasParam(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for InstantSpells
int LuaScriptInterface::luaSpellHasPlayerNameParam(lua_State* L)
{
// spell:hasPlayerNameParam(bool)
InstantSpell* spell = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_INSTANT, it means that this actually is no InstantSpell, so we return nil
if (spell->spellType != SPELL_INSTANT) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getHasPlayerNameParam());
} else {
spell->setHasPlayerNameParam(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for InstantSpells
int LuaScriptInterface::luaSpellNeedCasterTargetOrDirection(lua_State* L)
{
// spell:needCasterTargetOrDirection(bool)
InstantSpell* spell = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_INSTANT, it means that this actually is no InstantSpell, so we return nil
if (spell->spellType != SPELL_INSTANT) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getNeedCasterTargetOrDirection());
} else {
spell->setNeedCasterTargetOrDirection(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for InstantSpells
int LuaScriptInterface::luaSpellIsBlockingWalls(lua_State* L)
{
// spell:blockWalls(bool)
InstantSpell* spell = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_INSTANT, it means that this actually is no InstantSpell, so we return nil
if (spell->spellType != SPELL_INSTANT) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getBlockWalls());
} else {
spell->setBlockWalls(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for RuneSpells
int LuaScriptInterface::luaSpellRuneLevel(lua_State* L)
{
// spell:runeLevel(level)
RuneSpell* spell = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
int32_t level = getNumber<int32_t>(L, 2);
if (spell) {
// if spell != SPELL_RUNE, it means that this actually is no RuneSpell, so we return nil
if (spell->spellType != SPELL_RUNE) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getLevel());
} else {
spell->setLevel(level);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for RuneSpells
int LuaScriptInterface::luaSpellRuneMagicLevel(lua_State* L)
{
// spell:runeMagicLevel(magLevel)
RuneSpell* spell = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
int32_t magLevel = getNumber<int32_t>(L, 2);
if (spell) {
// if spell != SPELL_RUNE, it means that this actually is no RuneSpell, so we return nil
if (spell->spellType != SPELL_RUNE) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getMagicLevel());
} else {
spell->setMagicLevel(magLevel);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for RuneSpells
int LuaScriptInterface::luaSpellRuneId(lua_State* L)
{
// spell:runeId(id)
RuneSpell* rune = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
if (rune) {
// if spell != SPELL_RUNE, it means that this actually is no RuneSpell, so we return nil
if (rune->spellType != SPELL_RUNE) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
lua_pushnumber(L, rune->getRuneItemId());
} else {
rune->setRuneItemId(getNumber<uint16_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for RuneSpells
int LuaScriptInterface::luaSpellCharges(lua_State* L)
{
// spell:charges(charges)
RuneSpell* spell = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_RUNE, it means that this actually is no RuneSpell, so we return nil
if (spell->spellType != SPELL_RUNE) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getCharges());
} else {
spell->setCharges(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for RuneSpells
int LuaScriptInterface::luaSpellAllowFarUse(lua_State* L)
{
// spell:allowFarUse(bool)
RuneSpell* spell = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_RUNE, it means that this actually is no RuneSpell, so we return nil
if (spell->spellType != SPELL_RUNE) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getAllowFarUse());
} else {
spell->setAllowFarUse(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for RuneSpells
int LuaScriptInterface::luaSpellBlockWalls(lua_State* L)
{
// spell:blockWalls(bool)
RuneSpell* spell = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_RUNE, it means that this actually is no RuneSpell, so we return nil
if (spell->spellType != SPELL_RUNE) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getCheckLineOfSight());
} else {
spell->setCheckLineOfSight(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for RuneSpells
int LuaScriptInterface::luaSpellCheckFloor(lua_State* L)
{
// spell:checkFloor(bool)
RuneSpell* spell = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_RUNE, it means that this actually is no RuneSpell, so we return nil
if (spell->spellType != SPELL_RUNE) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getCheckFloor());
} else {
spell->setCheckFloor(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreateAction(lua_State* L)
{
// Action()
if (getScriptEnv()->getScriptInterface() != &g_scripts->getScriptInterface()) {
reportErrorFunc(L, "Actions can only be registered in the Scripts interface.");
lua_pushnil(L);
return 1;
}
Action* action = new Action(getScriptEnv()->getScriptInterface());
if (action) {
action->fromLua = true;
pushUserdata<Action>(L, action);
setMetatable(L, -1, "Action");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionOnUse(lua_State* L)
{
// action:onUse(callback)
Action* action = getUserdata<Action>(L, 1);
if (action) {
if (!action->loadCallback()) {
pushBoolean(L, false);
return 1;
}
action->scripted = true;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionRegister(lua_State* L)
{
// action:register()
Action* action = getUserdata<Action>(L, 1);
if (action) {
if (!action->isScripted()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, g_actions->registerLuaEvent(action));
action->clearActionIdRange();
action->clearItemIdRange();
action->clearUniqueIdRange();
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionItemId(lua_State* L)
{
// action:id(ids)
Action* action = getUserdata<Action>(L, 1);
if (action) {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
if (parameters > 1) {
for (int i = 0; i < parameters; ++i) {
action->addItemId(getNumber<uint32_t>(L, 2 + i));
}
} else {
action->addItemId(getNumber<uint32_t>(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionActionId(lua_State* L)
{
// action:aid(aids)
Action* action = getUserdata<Action>(L, 1);
if (action) {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
if (parameters > 1) {
for (int i = 0; i < parameters; ++i) {
action->addActionId(getNumber<uint32_t>(L, 2 + i));
}
} else {
action->addActionId(getNumber<uint32_t>(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionUniqueId(lua_State* L)
{
// action:uid(uids)
Action* action = getUserdata<Action>(L, 1);
if (action) {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
if (parameters > 1) {
for (int i = 0; i < parameters; ++i) {
action->addUniqueId(getNumber<uint32_t>(L, 2 + i));
}
} else {
action->addUniqueId(getNumber<uint32_t>(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionAllowFarUse(lua_State* L)
{
// action:allowFarUse(bool)
Action* action = getUserdata<Action>(L, 1);
if (action) {
action->setAllowFarUse(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionBlockWalls(lua_State* L)
{
// action:blockWalls(bool)
Action* action = getUserdata<Action>(L, 1);
if (action) {
action->setCheckLineOfSight(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionCheckFloor(lua_State* L)
{
// action:checkFloor(bool)
Action* action = getUserdata<Action>(L, 1);
if (action) {
action->setCheckFloor(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreateTalkaction(lua_State* L)
{
// TalkAction(words)
if (getScriptEnv()->getScriptInterface() != &g_scripts->getScriptInterface()) {
reportErrorFunc(L, "TalkActions can only be registered in the Scripts interface.");
lua_pushnil(L);
return 1;
}
TalkAction* talk = new TalkAction(getScriptEnv()->getScriptInterface());
if (talk) {
for (int i = 2; i <= lua_gettop(L); i++) {
talk->setWords(getString(L, i));
}
talk->fromLua = true;
pushUserdata<TalkAction>(L, talk);
setMetatable(L, -1, "TalkAction");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTalkactionOnSay(lua_State* L)
{
// talkAction:onSay(callback)
TalkAction* talk = getUserdata<TalkAction>(L, 1);
if (talk) {
if (!talk->loadCallback()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTalkactionRegister(lua_State* L)
{
// talkAction:register()
TalkAction* talk = getUserdata<TalkAction>(L, 1);
if (talk) {
if (!talk->isScripted()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, g_talkActions->registerLuaEvent(talk));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTalkactionSeparator(lua_State* L)
{
// talkAction:separator(sep)
TalkAction* talk = getUserdata<TalkAction>(L, 1);
if (talk) {
talk->setSeparator(getString(L, 2).c_str());
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTalkactionAccess(lua_State* L)
{
// talkAction:access(needAccess = false)
TalkAction* talk = getUserdata<TalkAction>(L, 1);
if (talk) {
talk->setNeedAccess(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTalkactionAccountType(lua_State* L)
{
// talkAction:accountType(AccountType_t = ACCOUNT_TYPE_NORMAL)
TalkAction* talk = getUserdata<TalkAction>(L, 1);
if (talk) {
talk->setRequiredAccountType(getNumber<AccountType_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreateCreatureEvent(lua_State* L)
{
// CreatureEvent(eventName)
if (getScriptEnv()->getScriptInterface() != &g_scripts->getScriptInterface()) {
reportErrorFunc(L, "CreatureEvents can only be registered in the Scripts interface.");
lua_pushnil(L);
return 1;
}
CreatureEvent* creature = new CreatureEvent(getScriptEnv()->getScriptInterface());
if (creature) {
creature->setName(getString(L, 2));
creature->fromLua = true;
pushUserdata<CreatureEvent>(L, creature);
setMetatable(L, -1, "CreatureEvent");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureEventType(lua_State* L)
{
// creatureevent:type(callback)
CreatureEvent* creature = getUserdata<CreatureEvent>(L, 1);
if (creature) {
std::string typeName = getString(L, 2);
std::string tmpStr = asLowerCaseString(typeName);
if (tmpStr == "login") {
creature->setEventType(CREATURE_EVENT_LOGIN);
} else if (tmpStr == "logout") {
creature->setEventType(CREATURE_EVENT_LOGOUT);
} else if (tmpStr == "think") {
creature->setEventType(CREATURE_EVENT_THINK);
} else if (tmpStr == "preparedeath") {
creature->setEventType(CREATURE_EVENT_PREPAREDEATH);
} else if (tmpStr == "death") {
creature->setEventType(CREATURE_EVENT_DEATH);
} else if (tmpStr == "kill") {
creature->setEventType(CREATURE_EVENT_KILL);
} else if (tmpStr == "advance") {
creature->setEventType(CREATURE_EVENT_ADVANCE);
} else if (tmpStr == "modalwindow") {
creature->setEventType(CREATURE_EVENT_MODALWINDOW);
} else if (tmpStr == "textedit") {
creature->setEventType(CREATURE_EVENT_TEXTEDIT);
} else if (tmpStr == "healthchange") {
creature->setEventType(CREATURE_EVENT_HEALTHCHANGE);
} else if (tmpStr == "manachange") {
creature->setEventType(CREATURE_EVENT_MANACHANGE);
} else if (tmpStr == "extendedopcode") {
creature->setEventType(CREATURE_EVENT_EXTENDED_OPCODE);
} else {
std::cout << "[Error - CreatureEvent::configureLuaEvent] Invalid type for creature event: " << typeName << std::endl;
pushBoolean(L, false);
}
creature->setLoaded(true);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureEventRegister(lua_State* L)
{
// creatureevent:register()
CreatureEvent* creature = getUserdata<CreatureEvent>(L, 1);
if (creature) {
if (!creature->isScripted()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, g_creatureEvents->registerLuaEvent(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureEventOnCallback(lua_State* L)
{
// creatureevent:onLogin / logout / etc. (callback)
CreatureEvent* creature = getUserdata<CreatureEvent>(L, 1);
if (creature) {
if (!creature->loadCallback()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreateMoveEvent(lua_State* L)
{
// MoveEvent()
if (getScriptEnv()->getScriptInterface() != &g_scripts->getScriptInterface()) {
reportErrorFunc(L, "MoveEvents can only be registered in the Scripts interface.");
lua_pushnil(L);
return 1;
}
MoveEvent* moveevent = new MoveEvent(getScriptEnv()->getScriptInterface());
if (moveevent) {
moveevent->fromLua = true;
pushUserdata<MoveEvent>(L, moveevent);
setMetatable(L, -1, "MoveEvent");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventType(lua_State* L)
{
// moveevent:type(callback)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
std::string typeName = getString(L, 2);
std::string tmpStr = asLowerCaseString(typeName);
if (tmpStr == "stepin") {
moveevent->setEventType(MOVE_EVENT_STEP_IN);
moveevent->stepFunction = moveevent->StepInField;
} else if (tmpStr == "stepout") {
moveevent->setEventType(MOVE_EVENT_STEP_OUT);
moveevent->stepFunction = moveevent->StepOutField;
} else if (tmpStr == "equip") {
moveevent->setEventType(MOVE_EVENT_EQUIP);
moveevent->equipFunction = moveevent->EquipItem;
} else if (tmpStr == "deequip") {
moveevent->setEventType(MOVE_EVENT_DEEQUIP);
moveevent->equipFunction = moveevent->DeEquipItem;
} else if (tmpStr == "additem") {
moveevent->setEventType(MOVE_EVENT_ADD_ITEM);
moveevent->moveFunction = moveevent->AddItemField;
} else if (tmpStr == "removeitem") {
moveevent->setEventType(MOVE_EVENT_REMOVE_ITEM);
moveevent->moveFunction = moveevent->RemoveItemField;
} else {
std::cout << "Error: [MoveEvent::configureMoveEvent] No valid event name " << typeName << std::endl;
pushBoolean(L, false);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventRegister(lua_State* L)
{
// moveevent:register()
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
if ((moveevent->getEventType() == MOVE_EVENT_EQUIP || moveevent->getEventType() == MOVE_EVENT_DEEQUIP) && moveevent->getSlot() == SLOTP_WHEREEVER) {
uint32_t id = moveevent->getItemIdRange().at(0);
ItemType& it = Item::items.getItemType(id);
moveevent->setSlot(it.slotPosition);
}
if (!moveevent->isScripted()) {
pushBoolean(L, g_moveEvents->registerLuaFunction(moveevent));
return 1;
}
pushBoolean(L, g_moveEvents->registerLuaEvent(moveevent));
moveevent->clearItemIdRange();
moveevent->clearActionIdRange();
moveevent->clearUniqueIdRange();
moveevent->clearPosList();
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventOnCallback(lua_State* L)
{
// moveevent:onEquip / deEquip / etc. (callback)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
if (!moveevent->loadCallback()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventSlot(lua_State* L)
{
// moveevent:slot(slot)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (!moveevent) {
lua_pushnil(L);
return 1;
}
if (moveevent->getEventType() == MOVE_EVENT_EQUIP || moveevent->getEventType() == MOVE_EVENT_DEEQUIP) {
std::string slotName = asLowerCaseString(getString(L, 2));
if (slotName == "head") {
moveevent->setSlot(SLOTP_HEAD);
} else if (slotName == "necklace") {
moveevent->setSlot(SLOTP_NECKLACE);
} else if (slotName == "backpack") {
moveevent->setSlot(SLOTP_BACKPACK);
} else if (slotName == "armor" || slotName == "body") {
moveevent->setSlot(SLOTP_ARMOR);
} else if (slotName == "right-hand") {
moveevent->setSlot(SLOTP_RIGHT);
} else if (slotName == "left-hand") {
moveevent->setSlot(SLOTP_LEFT);
} else if (slotName == "hand" || slotName == "shield") {
moveevent->setSlot(SLOTP_RIGHT | SLOTP_LEFT);
} else if (slotName == "legs") {
moveevent->setSlot(SLOTP_LEGS);
} else if (slotName == "feet") {
moveevent->setSlot(SLOTP_FEET);
} else if (slotName == "ring") {
moveevent->setSlot(SLOTP_RING);
} else if (slotName == "ammo") {
moveevent->setSlot(SLOTP_AMMO);
} else {
std::cout << "[Warning - MoveEvent::configureMoveEvent] Unknown slot type: " << slotName << std::endl;
pushBoolean(L, false);
return 1;
}
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMoveEventLevel(lua_State* L)
{
// moveevent:level(lvl)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
moveevent->setRequiredLevel(getNumber<uint32_t>(L, 2));
moveevent->setWieldInfo(WIELDINFO_LEVEL);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventMagLevel(lua_State* L)
{
// moveevent:magicLevel(lvl)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
moveevent->setRequiredMagLevel(getNumber<uint32_t>(L, 2));
moveevent->setWieldInfo(WIELDINFO_MAGLV);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventPremium(lua_State* L)
{
// moveevent:premium(bool)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
moveevent->setNeedPremium(getBoolean(L, 2));
moveevent->setWieldInfo(WIELDINFO_PREMIUM);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventVocation(lua_State* L)
{
// moveevent:vocation(vocName[, showInDescription = false, lastVoc = false])
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
moveevent->addVocEquipMap(getString(L, 2));
moveevent->setWieldInfo(WIELDINFO_VOCREQ);
std::string tmp;
bool showInDescription = false;
bool lastVoc = false;
if (getBoolean(L, 3)) {
showInDescription = getBoolean(L, 3);
}
if (getBoolean(L, 4)) {
lastVoc = getBoolean(L, 4);
}
if (showInDescription) {
if (moveevent->getVocationString().empty()) {
tmp = asLowerCaseString(getString(L, 2));
tmp += "s";
moveevent->setVocationString(tmp);
} else {
tmp = moveevent->getVocationString();
if (lastVoc) {
tmp += " and ";
} else {
tmp += ", ";
}
tmp += asLowerCaseString(getString(L, 2));
tmp += "s";
moveevent->setVocationString(tmp);
}
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventTileItem(lua_State* L)
{
// moveevent:tileItem(bool)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
moveevent->setTileItem(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventItemId(lua_State* L)
{
// moveevent:id(ids)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
if (parameters > 1) {
for (int i = 0; i < parameters; ++i) {
moveevent->addItemId(getNumber<uint32_t>(L, 2 + i));
}
} else {
moveevent->addItemId(getNumber<uint32_t>(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventActionId(lua_State* L)
{
// moveevent:aid(ids)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
if (parameters > 1) {
for (int i = 0; i < parameters; ++i) {
moveevent->addActionId(getNumber<uint32_t>(L, 2 + i));
}
} else {
moveevent->addActionId(getNumber<uint32_t>(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventUniqueId(lua_State* L)
{
// moveevent:uid(ids)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
if (parameters > 1) {
for (int i = 0; i < parameters; ++i) {
moveevent->addUniqueId(getNumber<uint32_t>(L, 2 + i));
}
} else {
moveevent->addUniqueId(getNumber<uint32_t>(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventPosition(lua_State* L)
{
// moveevent:position(positions)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
if (parameters > 1) {
for (int i = 0; i < parameters; ++i) {
moveevent->addPosList(getPosition(L, 2 + i));
}
} else {
moveevent->addPosList(getPosition(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreateGlobalEvent(lua_State* L)
{
// GlobalEvent(eventName)
if (getScriptEnv()->getScriptInterface() != &g_scripts->getScriptInterface()) {
reportErrorFunc(L, "GlobalEvents can only be registered in the Scripts interface.");
lua_pushnil(L);
return 1;
}
GlobalEvent* global = new GlobalEvent(getScriptEnv()->getScriptInterface());
if (global) {
global->setName(getString(L, 2));
global->setEventType(GLOBALEVENT_NONE);
global->fromLua = true;
pushUserdata<GlobalEvent>(L, global);
setMetatable(L, -1, "GlobalEvent");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGlobalEventType(lua_State* L)
{
// globalevent:type(callback)
GlobalEvent* global = getUserdata<GlobalEvent>(L, 1);
if (global) {
std::string typeName = getString(L, 2);
std::string tmpStr = asLowerCaseString(typeName);
if (tmpStr == "startup") {
global->setEventType(GLOBALEVENT_STARTUP);
} else if (tmpStr == "shutdown") {
global->setEventType(GLOBALEVENT_SHUTDOWN);
} else if (tmpStr == "record") {
global->setEventType(GLOBALEVENT_RECORD);
} else {
std::cout << "[Error - CreatureEvent::configureLuaEvent] Invalid type for global event: " << typeName << std::endl;
pushBoolean(L, false);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGlobalEventRegister(lua_State* L)
{
// globalevent:register()
GlobalEvent* globalevent = getUserdata<GlobalEvent>(L, 1);
if (globalevent) {
if (!globalevent->isScripted()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, g_globalEvents->registerLuaEvent(globalevent));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGlobalEventOnCallback(lua_State* L)
{
// globalevent:onThink / record / etc. (callback)
GlobalEvent* globalevent = getUserdata<GlobalEvent>(L, 1);
if (globalevent) {
if (!globalevent->loadCallback()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGlobalEventTime(lua_State* L)
{
// globalevent:time(time)
GlobalEvent* globalevent = getUserdata<GlobalEvent>(L, 1);
if (globalevent) {
std::string timer = getString(L, 2);
std::vector<int32_t> params = vectorAtoi(explodeString(timer, ":"));
int32_t hour = params.front();
if (hour < 0 || hour > 23) {
std::cout << "[Error - GlobalEvent::configureEvent] Invalid hour \"" << timer << "\" for globalevent with name: " << globalevent->getName() << std::endl;
pushBoolean(L, false);
return 1;
}
globalevent->setInterval(hour << 16);
int32_t min = 0;
int32_t sec = 0;
if (params.size() > 1) {
min = params[1];
if (min < 0 || min > 59) {
std::cout << "[Error - GlobalEvent::configureEvent] Invalid minute \"" << timer << "\" for globalevent with name: " << globalevent->getName() << std::endl;
pushBoolean(L, false);
return 1;
}
if (params.size() > 2) {
sec = params[2];
if (sec < 0 || sec > 59) {
std::cout << "[Error - GlobalEvent::configureEvent] Invalid second \"" << timer << "\" for globalevent with name: " << globalevent->getName() << std::endl;
pushBoolean(L, false);
return 1;
}
}
}
time_t current_time = time(nullptr);
tm* timeinfo = localtime(¤t_time);
timeinfo->tm_hour = hour;
timeinfo->tm_min = min;
timeinfo->tm_sec = sec;
time_t difference = static_cast<time_t>(difftime(mktime(timeinfo), current_time));
if (difference < 0) {
difference += 86400;
}
globalevent->setNextExecution(current_time + difference);
globalevent->setEventType(GLOBALEVENT_TIMER);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGlobalEventInterval(lua_State* L)
{
// globalevent:interval(interval)
GlobalEvent* globalevent = getUserdata<GlobalEvent>(L, 1);
if (globalevent) {
globalevent->setInterval(getNumber<uint32_t>(L, 2));
globalevent->setNextExecution(OTSYS_TIME() + getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Weapon
int LuaScriptInterface::luaCreateWeapon(lua_State* L)
{
// Weapon(type)
if (getScriptEnv()->getScriptInterface() != &g_scripts->getScriptInterface()) {
reportErrorFunc(L, "Weapons can only be registered in the Scripts interface.");
lua_pushnil(L);
return 1;
}
WeaponType_t type = getNumber<WeaponType_t>(L, 2);
switch (type) {
case WEAPON_SWORD:
case WEAPON_AXE:
case WEAPON_CLUB: {
WeaponMelee* weapon = new WeaponMelee(getScriptEnv()->getScriptInterface());
if (weapon) {
pushUserdata<WeaponMelee>(L, weapon);
setMetatable(L, -1, "Weapon");
weapon->weaponType = type;
weapon->fromLua = true;
} else {
lua_pushnil(L);
}
break;
}
case WEAPON_DISTANCE:
case WEAPON_AMMO: {
WeaponDistance* weapon = new WeaponDistance(getScriptEnv()->getScriptInterface());
if (weapon) {
pushUserdata<WeaponDistance>(L, weapon);
setMetatable(L, -1, "Weapon");
weapon->weaponType = type;
weapon->fromLua = true;
} else {
lua_pushnil(L);
}
break;
}
case WEAPON_WAND: {
WeaponWand* weapon = new WeaponWand(getScriptEnv()->getScriptInterface());
if (weapon) {
pushUserdata<WeaponWand>(L, weapon);
setMetatable(L, -1, "Weapon");
weapon->weaponType = type;
weapon->fromLua = true;
} else {
lua_pushnil(L);
}
break;
}
default: {
lua_pushnil(L);
break;
}
}
return 1;
}
int LuaScriptInterface::luaWeaponAction(lua_State* L)
{
// weapon:action(callback)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
std::string typeName = getString(L, 2);
std::string tmpStr = asLowerCaseString(typeName);
if (tmpStr == "removecount") {
weapon->action = WEAPONACTION_REMOVECOUNT;
} else if (tmpStr == "removecharge") {
weapon->action = WEAPONACTION_REMOVECHARGE;
} else if (tmpStr == "move") {
weapon->action = WEAPONACTION_MOVE;
} else {
std::cout << "Error: [Weapon::action] No valid action " << typeName << std::endl;
pushBoolean(L, false);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponRegister(lua_State* L)
{
// weapon:register()
Weapon** weaponPtr = getRawUserdata<Weapon>(L, 1);
if (!weaponPtr) {
lua_pushnil(L);
return 1;
}
if (auto* weapon = *weaponPtr) {
if (weapon->weaponType == WEAPON_DISTANCE || weapon->weaponType == WEAPON_AMMO) {
weapon = getUserdata<WeaponDistance>(L, 1);
} else if (weapon->weaponType == WEAPON_WAND) {
weapon = getUserdata<WeaponWand>(L, 1);
} else {
weapon = getUserdata<WeaponMelee>(L, 1);
}
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.weaponType = weapon->weaponType;
if (weapon->getWieldInfo() != 0) {
it.wieldInfo = weapon->getWieldInfo();
it.vocationString = weapon->getVocationString();
it.minReqLevel = weapon->getReqLevel();
it.minReqMagicLevel = weapon->getReqMagLv();
}
weapon->configureWeapon(it);
pushBoolean(L, g_weapons->registerLuaEvent(weapon));
*weaponPtr = nullptr; // Remove luascript reference
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponOnUseWeapon(lua_State* L)
{
// weapon:onUseWeapon(callback)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
if (!weapon->loadCallback()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponUnproperly(lua_State* L)
{
// weapon:wieldedUnproperly(bool)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setWieldUnproperly(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponLevel(lua_State* L)
{
// weapon:level(lvl)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setRequiredLevel(getNumber<uint32_t>(L, 2));
weapon->setWieldInfo(WIELDINFO_LEVEL);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponMagicLevel(lua_State* L)
{
// weapon:magicLevel(lvl)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setRequiredMagLevel(getNumber<uint32_t>(L, 2));
weapon->setWieldInfo(WIELDINFO_MAGLV);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponMana(lua_State* L)
{
// weapon:mana(mana)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setMana(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponManaPercent(lua_State* L)
{
// weapon:manaPercent(percent)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setManaPercent(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponHealth(lua_State* L)
{
// weapon:health(health)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setHealth(getNumber<int32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponHealthPercent(lua_State* L)
{
// weapon:healthPercent(percent)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setHealthPercent(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponSoul(lua_State* L)
{
// weapon:soul(soul)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setSoul(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponBreakChance(lua_State* L)
{
// weapon:breakChance(percent)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setBreakChance(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponWandDamage(lua_State* L)
{
// weapon:damage(damage[min, max]) only use this if the weapon is a wand!
WeaponWand* weapon = getUserdata<WeaponWand>(L, 1);
if (weapon) {
weapon->setMinChange(getNumber<uint32_t>(L, 2));
if (lua_gettop(L) > 2) {
weapon->setMaxChange(getNumber<uint32_t>(L, 3));
} else {
weapon->setMaxChange(getNumber<uint32_t>(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponElement(lua_State* L)
{
// weapon:element(combatType)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
if (!getNumber<CombatType_t>(L, 2)) {
std::string element = getString(L, 2);
std::string tmpStrValue = asLowerCaseString(element);
if (tmpStrValue == "earth") {
weapon->params.combatType = COMBAT_EARTHDAMAGE;
} else if (tmpStrValue == "ice") {
weapon->params.combatType = COMBAT_ICEDAMAGE;
} else if (tmpStrValue == "energy") {
weapon->params.combatType = COMBAT_ENERGYDAMAGE;
} else if (tmpStrValue == "fire") {
weapon->params.combatType = COMBAT_FIREDAMAGE;
} else if (tmpStrValue == "death") {
weapon->params.combatType = COMBAT_DEATHDAMAGE;
} else if (tmpStrValue == "holy") {
weapon->params.combatType = COMBAT_HOLYDAMAGE;
} else {
std::cout << "[Warning - weapon:element] Type \"" << element << "\" does not exist." << std::endl;
}
} else {
weapon->params.combatType = getNumber<CombatType_t>(L, 2);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponPremium(lua_State* L)
{
// weapon:premium(bool)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setNeedPremium(getBoolean(L, 2));
weapon->setWieldInfo(WIELDINFO_PREMIUM);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponVocation(lua_State* L)
{
// weapon:vocation(vocName[, showInDescription = false, lastVoc = false])
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->addVocWeaponMap(getString(L, 2));
weapon->setWieldInfo(WIELDINFO_VOCREQ);
std::string tmp;
bool showInDescription = getBoolean(L, 3, false);
bool lastVoc = getBoolean(L, 4, false);
if (showInDescription) {
if (weapon->getVocationString().empty()) {
tmp = asLowerCaseString(getString(L, 2));
tmp += "s";
weapon->setVocationString(tmp);
} else {
tmp = weapon->getVocationString();
if (lastVoc) {
tmp += " and ";
} else {
tmp += ", ";
}
tmp += asLowerCaseString(getString(L, 2));
tmp += "s";
weapon->setVocationString(tmp);
}
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponId(lua_State* L)
{
// weapon:id(id)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setID(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponAttack(lua_State* L)
{
// weapon:attack(atk)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.attack = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponDefense(lua_State* L)
{
// weapon:defense(defense[, extraDefense])
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.defense = getNumber<int32_t>(L, 2);
if (lua_gettop(L) > 2) {
it.extraDefense = getNumber<int32_t>(L, 3);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponRange(lua_State* L)
{
// weapon:range(range)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.shootRange = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponCharges(lua_State* L)
{
// weapon:charges(charges[, showCharges = true])
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
bool showCharges = getBoolean(L, 3, true);
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.charges = getNumber<uint8_t>(L, 2);
it.showCharges = showCharges;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponDuration(lua_State* L)
{
// weapon:duration(duration[, showDuration = true])
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
bool showDuration = getBoolean(L, 3, true);
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.decayTime = getNumber<uint32_t>(L, 2);
it.showDuration = showDuration;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponDecayTo(lua_State* L)
{
// weapon:decayTo([itemid = 0])
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t itemid = getNumber<uint16_t>(L, 2, 0);
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.decayTo = itemid;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponTransformEquipTo(lua_State* L)
{
// weapon:transformEquipTo(itemid)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.transformEquipTo = getNumber<uint16_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponTransformDeEquipTo(lua_State* L)
{
// weapon:transformDeEquipTo(itemid)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.transformDeEquipTo = getNumber<uint16_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponShootType(lua_State* L)
{
// weapon:shootType(type)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.shootType = getNumber<ShootType_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponSlotType(lua_State* L)
{
// weapon:slotType(slot)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
std::string slot = getString(L, 2);
if (slot == "two-handed") {
it.slotPosition |= SLOTP_TWO_HAND;
} else {
it.slotPosition |= SLOTP_HAND;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponAmmoType(lua_State* L)
{
// weapon:ammoType(type)
WeaponDistance* weapon = getUserdata<WeaponDistance>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
std::string type = getString(L, 2);
if (type == "arrow") {
it.ammoType = AMMO_ARROW;
} else if (type == "bolt"){
it.ammoType = AMMO_BOLT;
} else {
std::cout << "[Warning - weapon:ammoType] Type \"" << type << "\" does not exist." << std::endl;
lua_pushnil(L);
return 1;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponHitChance(lua_State* L)
{
// weapon:hitChance(chance)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.hitChance = getNumber<int8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponMaxHitChance(lua_State* L)
{
// weapon:maxHitChance(max)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.maxHitChance = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponExtraElement(lua_State* L)
{
// weapon:extraElement(atk, combatType)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.abilities.get()->elementDamage = getNumber<uint16_t>(L, 2);
if (!getNumber<CombatType_t>(L, 3)) {
std::string element = getString(L, 3);
std::string tmpStrValue = asLowerCaseString(element);
if (tmpStrValue == "earth") {
it.abilities.get()->elementType = COMBAT_EARTHDAMAGE;
} else if (tmpStrValue == "ice") {
it.abilities.get()->elementType = COMBAT_ICEDAMAGE;
} else if (tmpStrValue == "energy") {
it.abilities.get()->elementType = COMBAT_ENERGYDAMAGE;
} else if (tmpStrValue == "fire") {
it.abilities.get()->elementType = COMBAT_FIREDAMAGE;
} else if (tmpStrValue == "death") {
it.abilities.get()->elementType = COMBAT_DEATHDAMAGE;
} else if (tmpStrValue == "holy") {
it.abilities.get()->elementType = COMBAT_HOLYDAMAGE;
} else {
std::cout << "[Warning - weapon:extraElement] Type \"" << element << "\" does not exist." << std::endl;
}
} else {
it.abilities.get()->elementType = getNumber<CombatType_t>(L, 3);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
//
LuaEnvironment::LuaEnvironment() : LuaScriptInterface("Main Interface") {}
LuaEnvironment::~LuaEnvironment()
{
delete testInterface;
closeState();
}
bool LuaEnvironment::initState()
{
luaState = luaL_newstate();
if (!luaState) {
return false;
}
luaL_openlibs(luaState);
registerFunctions();
runningEventId = EVENT_ID_USER;
return true;
}
bool LuaEnvironment::reInitState()
{
// TODO: get children, reload children
closeState();
return initState();
}
bool LuaEnvironment::closeState()
{
if (!luaState) {
return false;
}
for (const auto& combatEntry : combatIdMap) {
clearCombatObjects(combatEntry.first);
}
for (const auto& areaEntry : areaIdMap) {
clearAreaObjects(areaEntry.first);
}
for (auto& timerEntry : timerEvents) {
LuaTimerEventDesc timerEventDesc = std::move(timerEntry.second);
for (int32_t parameter : timerEventDesc.parameters) {
luaL_unref(luaState, LUA_REGISTRYINDEX, parameter);
}
luaL_unref(luaState, LUA_REGISTRYINDEX, timerEventDesc.function);
}
combatIdMap.clear();
areaIdMap.clear();
timerEvents.clear();
cacheFiles.clear();
lua_close(luaState);
luaState = nullptr;
return true;
}
LuaScriptInterface* LuaEnvironment::getTestInterface()
{
if (!testInterface) {
testInterface = new LuaScriptInterface("Test Interface");
testInterface->initState();
}
return testInterface;
}
Combat_ptr LuaEnvironment::getCombatObject(uint32_t id) const
{
auto it = combatMap.find(id);
if (it == combatMap.end()) {
return nullptr;
}
return it->second;
}
Combat_ptr LuaEnvironment::createCombatObject(LuaScriptInterface* interface)
{
Combat_ptr combat = std::make_shared<Combat>();
combatMap[++lastCombatId] = combat;
combatIdMap[interface].push_back(lastCombatId);
return combat;
}
void LuaEnvironment::clearCombatObjects(LuaScriptInterface* interface)
{
auto it = combatIdMap.find(interface);
if (it == combatIdMap.end()) {
return;
}
for (uint32_t id : it->second) {
auto itt = combatMap.find(id);
if (itt != combatMap.end()) {
combatMap.erase(itt);
}
}
it->second.clear();
}
AreaCombat* LuaEnvironment::getAreaObject(uint32_t id) const
{
auto it = areaMap.find(id);
if (it == areaMap.end()) {
return nullptr;
}
return it->second;
}
uint32_t LuaEnvironment::createAreaObject(LuaScriptInterface* interface)
{
areaMap[++lastAreaId] = new AreaCombat;
areaIdMap[interface].push_back(lastAreaId);
return lastAreaId;
}
void LuaEnvironment::clearAreaObjects(LuaScriptInterface* interface)
{
auto it = areaIdMap.find(interface);
if (it == areaIdMap.end()) {
return;
}
for (uint32_t id : it->second) {
auto itt = areaMap.find(id);
if (itt != areaMap.end()) {
delete itt->second;
areaMap.erase(itt);
}
}
it->second.clear();
}
void LuaEnvironment::executeTimerEvent(uint32_t eventIndex)
{
auto it = timerEvents.find(eventIndex);
if (it == timerEvents.end()) {
return;
}
LuaTimerEventDesc timerEventDesc = std::move(it->second);
timerEvents.erase(it);
//push function
lua_rawgeti(luaState, LUA_REGISTRYINDEX, timerEventDesc.function);
//push parameters
for (auto parameter : boost::adaptors::reverse(timerEventDesc.parameters)) {
lua_rawgeti(luaState, LUA_REGISTRYINDEX, parameter);
}
//call the function
if (reserveScriptEnv()) {
ScriptEnvironment* env = getScriptEnv();
env->setTimerEvent();
env->setScriptId(timerEventDesc.scriptId, this);
callFunction(timerEventDesc.parameters.size());
} else {
std::cout << "[Error - LuaScriptInterface::executeTimerEvent] Call stack overflow" << std::endl;
}
//free resources
luaL_unref(luaState, LUA_REGISTRYINDEX, timerEventDesc.function);
for (auto parameter : timerEventDesc.parameters) {
luaL_unref(luaState, LUA_REGISTRYINDEX, parameter);
}
}
| 1 | 19,363 | I think you need to change line 928 value from 6 to 7 too, since now table will have 7 values | otland-forgottenserver | cpp |
@@ -126,3 +126,14 @@ def test_split_into_nhot_long(seed, st):
assert set(f1.names) == set(fr.names)
f1 = f1[..., fr.names]
assert f1.to_list() == fr.to_list()
+
+
+def test_split_into_nhot_view():
+ f0 = dt.Frame(A=["cat,dog,mouse", "mouse", None, "dog, cat"])
+ f1 = dt.split_into_nhot(f0[::-1, :])
+ f2 = dt.split_into_nhot(f0[3, :])
+ assert set(f1.names) == {"cat", "dog", "mouse"}
+ assert f1[:, ["cat", "dog", "mouse"]].to_list() == \
+ [[1, 0, 0, 1], [1, 0, 0, 1], [0, 0, 1, 1]]
+ assert set(f2.names) == {"cat", "dog"}
+ assert f2[:, ["cat", "dog"]].to_list() == [[1], [1]] | 1 | #!/usr/bin/env python
#-------------------------------------------------------------------------------
# Copyright 2018 H2O.ai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-------------------------------------------------------------------------------
import datatable as dt
import pytest
import random
from datatable import stype
#-------------------------------------------------------------------------------
# split_into_nhot
#-------------------------------------------------------------------------------
def test_split_into_nhot0():
f0 = dt.Frame(["cat, dog, mouse, peacock, frog",
"armadillo, fox, hedgehog",
"dog, fox, mouse, cat, peacock",
"horse, raccoon, cat, frog, dog"])
f1 = dt.split_into_nhot(f0)
f1.internal.check()
fr = dt.Frame({"cat": [1, 0, 1, 1],
"dog": [1, 0, 1, 1],
"mouse": [1, 0, 1, 0],
"peacock": [1, 0, 1, 0],
"frog": [1, 0, 0, 1],
"armadillo": [0, 1, 0, 0],
"fox": [0, 1, 1, 0],
"hedgehog": [0, 1, 0, 0],
"horse": [0, 0, 0, 1],
"raccoon": [0, 0, 0, 1]})
assert set(f1.names) == set(fr.names)
fr = fr[:, f1.names]
assert f1.names == fr.names
assert f1.stypes == (dt.stype.bool8, ) * f1.ncols
assert f1.shape == fr.shape
assert f1.to_list() == fr.to_list()
def test_split_into_nhot1():
f0 = dt.Frame([" meow \n",
"[ meow]",
"['meow' ,purr]",
'(\t"meow", \'purr\')',
"{purr}"])
f1 = dt.split_into_nhot(f0)
f1.internal.check()
fr = dt.Frame(meow=[1, 1, 1, 1, 0], purr=[0, 0, 1, 1, 1])
assert set(f1.names) == set(fr.names)
fr = fr[..., f1.names]
assert f1.shape == fr.shape == (5, 2)
assert f1.stypes == (dt.stype.bool8, dt.stype.bool8)
assert f1.to_list() == fr.to_list()
def test_split_into_nhot_sep():
f0 = dt.Frame(["a|b|c", "b|a", "a|c"])
f1 = dt.split_into_nhot(f0, sep="|")
assert set(f1.names) == {"a", "b", "c"}
fr = dt.Frame(a=[1, 1, 1], b=[1, 1, 0], c=[1, 0, 1])
assert set(f1.names) == set(fr.names)
assert f1.to_list() == fr[:, f1.names].to_list()
def test_split_into_nhot_quotes():
f0 = dt.split_into_nhot(dt.Frame(['foo, "bar, baz"']))
f1 = dt.split_into_nhot(dt.Frame(['foo, "bar, baz']))
assert set(f0.names) == {"foo", "bar, baz"}
assert set(f1.names) == {"foo", '"bar', "baz"}
def test_split_into_nhot_bad():
f0 = dt.Frame([[1.25], ["foo"], ["bar"]])
with pytest.raises(ValueError) as e:
dt.split_into_nhot(f0)
assert ("Function split_into_nhot() may only be applied to a single-column "
"Frame of type string; got frame with 3 columns" == str(e.value))
with pytest.raises(TypeError) as e:
dt.split_into_nhot(f0[:, 0])
assert ("Function split_into_nhot() may only be applied to a single-column "
"Frame of type string; received a column of type float64" ==
str(e.value))
with pytest.raises(ValueError) as e:
dt.split_into_nhot(f0[:, 1], sep=",;-")
assert ("Parameter `sep` in split_into_nhot() must be a single character"
in str(e.value))
@pytest.mark.parametrize("seed, st", [(random.getrandbits(32), stype.str32),
(random.getrandbits(32), stype.str64)])
def test_split_into_nhot_long(seed, st):
random.seed(seed)
n = int(random.expovariate(0.0001) + 100)
col1 = [random.getrandbits(1) for _ in range(n)]
col2 = [random.getrandbits(1) for _ in range(n)]
col3 = [random.getrandbits(1) for _ in range(n)]
col4 = [0] * n
data = [",".join(["liberty"] * col1[i] +
["equality"] * col2[i] +
["justice"] * col3[i]) for i in range(n)]
i = random.randint(0, n - 1)
col4[i] = 1
data[i] += ", freedom"
f0 = dt.Frame(data, stype=st)
assert f0.stypes == (st,)
assert f0.shape == (n, 1)
f1 = dt.split_into_nhot(f0)
assert f1.shape == (n, 4)
fr = dt.Frame(liberty=col1, equality=col2, justice=col3, freedom=col4)
assert set(f1.names) == set(fr.names)
f1 = f1[..., fr.names]
assert f1.to_list() == fr.to_list()
| 1 | 11,420 | Does `f0[::-1, :]` mean all the rows taken in a reverse order? | h2oai-datatable | py |
@@ -30,7 +30,7 @@ public class JavaProcessJobTest
" By JULIE BOSMAN \n" +
"Published: August 11, 2010 \n" +
" \n" +
- "Twelve years later, it may be Joe Fox�s turn to worry. Readers have gone from skipping small \n" +
+ "Twelve years later, it may be Joe Fox���s turn to worry. Readers have gone from skipping small \n" +
"bookstores to wondering if they need bookstores at all. More people are ordering books online \n" +
"or plucking them from the best-seller bin at Wal-Mart";
| 1 | package azkaban.test.jobExecutor;
import java.io.IOException;
import java.util.Date;
import java.util.Properties;
import org.apache.log4j.Logger;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import azkaban.utils.Props;
import azkaban.jobExecutor.AbstractProcessJob;
import azkaban.jobExecutor.JavaProcessJob;
import azkaban.jobExecutor.ProcessJob;
public class JavaProcessJobTest
{
private JavaProcessJob job = null;
// private JobDescriptor descriptor = null;
private Props props = null;
private Logger log = Logger.getLogger(JavaProcessJob.class);
private static String classPaths ;
private static final String inputContent =
"Quick Change in Strategy for a Bookseller \n" +
" By JULIE BOSMAN \n" +
"Published: August 11, 2010 \n" +
" \n" +
"Twelve years later, it may be Joe Fox�s turn to worry. Readers have gone from skipping small \n" +
"bookstores to wondering if they need bookstores at all. More people are ordering books online \n" +
"or plucking them from the best-seller bin at Wal-Mart";
private static final String errorInputContent =
inputContent + "\n stop_here " +
"But the threat that has the industry and some readers the most rattled is the growth of e-books. \n" +
" In the first five months of 2009, e-books made up 2.9 percent of trade book sales. In the same period \n" +
"in 2010, sales of e-books, which generally cost less than hardcover books, grew to 8.5 percent, according \n" +
"to the Association of American Publishers, spurred by sales of the Amazon Kindle and the new Apple iPad. \n" +
"For Barnes & Noble, long the largest and most powerful bookstore chain in the country, the new competition \n" +
"has led to declining profits and store traffic.";
private static String inputFile ;
private static String errorInputFile ;
private static String outputFile ;
@BeforeClass
public static void init() {
// get the classpath
Properties prop = System.getProperties();
classPaths = String.format("'%s'", prop.getProperty("java.class.path", null));
long time = (new Date()).getTime();
inputFile = "/tmp/azkaban_input_" + time;
errorInputFile = "/tmp/azkaban_input_error_" + time;
outputFile = "/tmp/azkaban_output_" + time;
// dump input files
try {
Utils.dumpFile(inputFile, inputContent);
Utils.dumpFile(errorInputFile, errorInputContent);
}
catch (IOException e) {
e.printStackTrace(System.err);
Assert.fail("error in creating input file:" + e.getLocalizedMessage());
}
}
@AfterClass
public static void cleanup() {
// remove the input file and error input file
Utils.removeFile(inputFile);
Utils.removeFile(errorInputFile);
//Utils.removeFile(outputFile);
}
@Before
public void setUp() {
/* initialize job */
// descriptor = EasyMock.createMock(JobDescriptor.class);
props = new Props();
props.put(AbstractProcessJob.WORKING_DIR, ".");
props.put("type", "java");
props.put("fullPath", ".");
// EasyMock.expect(descriptor.getId()).andReturn("java").times(1);
// EasyMock.expect(descriptor.getProps()).andReturn(props).times(1);
// EasyMock.expect(descriptor.getFullPath()).andReturn(".").times(1);
//
// EasyMock.replay(descriptor);
job = new JavaProcessJob("testJavaProcess", props, props, log);
// EasyMock.verify(descriptor);
}
@Test
public void testJavaJob() throws Exception {
/* initialize the Props */
props.put(JavaProcessJob.JOB_CLASS, "azkaban.test.jobExecutor.WordCountLocal");
props.put(ProcessJob.WORKING_DIR, ".");
props.put("input", inputFile);
props.put("output", outputFile);
props.put("classpath", classPaths);
job.run();
}
@Test
public void testJavaJobHashmap() throws Exception {
/* initialize the Props */
props.put(JavaProcessJob.JOB_CLASS, "azkaban.test.executor.SleepJavaJob");
props.put("seconds", 1);
props.put(ProcessJob.WORKING_DIR, ".");
props.put("input", inputFile);
props.put("output", outputFile);
props.put("classpath", classPaths);
job.run();
}
@Test
public void testFailedJavaJob() throws Exception {
props.put(JavaProcessJob.JOB_CLASS, "azkaban.test.jobExecutor.WordCountLocal");
props.put(ProcessJob.WORKING_DIR, ".");
props.put("input", errorInputFile);
props.put("output", outputFile);
props.put("classpath", classPaths);
try {
job.run();
}
catch (RuntimeException e) {
Assert.assertTrue(true);
}
}
}
| 1 | 9,638 | Looks like there are strange characters here. | azkaban-azkaban | java |
@@ -97,7 +97,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
status.Experiment.Phase = v1alpha1.ExperimentPhaseFinished
status.FailedMessage = emptyString
- } else if chaos.IsPaused() {
+ } else if chaos.GetPause() != "" {
if status.Experiment.Phase == v1alpha1.ExperimentPhaseRunning {
r.Log.Info("Pausing")
| 1 | // Copyright 2019 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package twophase
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
"k8s.io/client-go/util/retry"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/controllers/reconciler"
"github.com/chaos-mesh/chaos-mesh/pkg/utils"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const emptyString = ""
// Reconciler for the twophase reconciler
type Reconciler struct {
reconciler.InnerReconciler
client.Client
client.Reader
Log logr.Logger
}
// NewReconciler would create reconciler for twophase controller
func NewReconciler(r reconciler.InnerReconciler, client client.Client, reader client.Reader, log logr.Logger) *Reconciler {
return &Reconciler{
InnerReconciler: r,
Client: client,
Reader: reader,
Log: log,
}
}
// Reconcile is twophase reconcile implement
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
var err error
now := time.Now()
r.Log.Info("Reconciling a two phase chaos", "name", req.Name, "namespace", req.Namespace)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_chaos := r.Object()
if err = r.Client.Get(ctx, req.NamespacedName, _chaos); err != nil {
r.Log.Error(err, "unable to get chaos")
return ctrl.Result{}, err
}
chaos := _chaos.(v1alpha1.InnerSchedulerObject)
duration, err := chaos.GetDuration()
if err != nil {
r.Log.Error(err, "failed to get chaos duration")
return ctrl.Result{}, err
}
scheduler := chaos.GetScheduler()
if scheduler == nil {
r.Log.Info("Scheduler should be defined currently")
return ctrl.Result{}, fmt.Errorf("misdefined scheduler")
}
if duration == nil {
zero := 0 * time.Second
duration = &zero
}
status := chaos.GetStatus()
if chaos.IsDeleted() {
// This chaos was deleted
r.Log.Info("Removing self")
err = r.Recover(ctx, req, chaos)
if err != nil {
r.Log.Error(err, "failed to recover chaos")
updateFailedMessage(ctx, r, chaos, err.Error())
return ctrl.Result{Requeue: true}, err
}
status.Experiment.Phase = v1alpha1.ExperimentPhaseFinished
status.FailedMessage = emptyString
} else if chaos.IsPaused() {
if status.Experiment.Phase == v1alpha1.ExperimentPhaseRunning {
r.Log.Info("Pausing")
err = r.Recover(ctx, req, chaos)
if err != nil {
r.Log.Error(err, "failed to pause chaos")
updateFailedMessage(ctx, r, chaos, err.Error())
return ctrl.Result{Requeue: true}, err
}
now := time.Now()
status.Experiment.EndTime = &metav1.Time{
Time: now,
}
if status.Experiment.StartTime != nil {
status.Experiment.Duration = now.Sub(status.Experiment.StartTime.Time).String()
}
}
status.Experiment.Phase = v1alpha1.ExperimentPhasePaused
status.FailedMessage = emptyString
} else if !chaos.GetNextRecover().IsZero() && chaos.GetNextRecover().Before(now) {
// Start recover
r.Log.Info("Recovering")
// Don't need to recover again if chaos was paused before
if status.Experiment.Phase != v1alpha1.ExperimentPhasePaused {
if err = r.Recover(ctx, req, chaos); err != nil {
r.Log.Error(err, "failed to recover chaos")
updateFailedMessage(ctx, r, chaos, err.Error())
return ctrl.Result{Requeue: true}, err
}
}
chaos.SetNextRecover(time.Time{})
status.Experiment.EndTime = &metav1.Time{
Time: time.Now(),
}
status.Experiment.Phase = v1alpha1.ExperimentPhaseWaiting
status.FailedMessage = emptyString
} else if (status.Experiment.Phase == v1alpha1.ExperimentPhaseFailed ||
status.Experiment.Phase == v1alpha1.ExperimentPhasePaused) &&
!chaos.GetNextRecover().IsZero() && chaos.GetNextRecover().After(now) {
// Only resume/retry chaos in the case when current round is not finished,
// which means the current time is before recover time. Otherwise we
// don't resume the chaos and just wait for the start of next round.
r.Log.Info("Resuming/Retrying")
dur := chaos.GetNextRecover().Sub(now)
if err = applyAction(ctx, r, req, dur, chaos); err != nil {
updateFailedMessage(ctx, r, chaos, err.Error())
return ctrl.Result{Requeue: true}, err
}
status.FailedMessage = emptyString
} else if chaos.GetNextStart().Before(now) {
r.Log.Info("Starting")
tempStart, err := utils.NextTime(*chaos.GetScheduler(), now)
if err != nil {
r.Log.Error(err, "failed to calculate the start time")
updateFailedMessage(ctx, r, chaos, err.Error())
return ctrl.Result{}, err
}
tempRecover := now.Add(*duration)
if tempStart.Before(tempRecover) {
err := fmt.Errorf("nextRecover shouldn't be later than nextStart")
r.Log.Error(err, "Then recover can never be reached.", "scheduler", *chaos.GetScheduler(), "duration", *duration)
updateFailedMessage(ctx, r, chaos, err.Error())
return ctrl.Result{}, err
}
if err = applyAction(ctx, r, req, *duration, chaos); err != nil {
updateFailedMessage(ctx, r, chaos, err.Error())
return ctrl.Result{Requeue: true}, err
}
nextStart, err := utils.NextTime(*chaos.GetScheduler(), status.Experiment.StartTime.Time)
if err != nil {
r.Log.Error(err, "failed to get the next start time")
return ctrl.Result{}, err
}
nextRecover := status.Experiment.StartTime.Time.Add(*duration)
chaos.SetNextStart(*nextStart)
chaos.SetNextRecover(nextRecover)
status.FailedMessage = emptyString
} else {
r.Log.Info("Waiting")
nextStart, err := utils.NextTime(*chaos.GetScheduler(), status.Experiment.StartTime.Time)
if err != nil {
r.Log.Error(err, "failed to get next start time")
return ctrl.Result{}, err
}
nextTime := chaos.GetNextStart()
// if nextStart is not equal to nextTime, the scheduler may have been modified.
// So set nextStart to time.Now.
if nextStart.Equal(nextTime) {
if !chaos.GetNextRecover().IsZero() && chaos.GetNextRecover().Before(nextTime) {
nextTime = chaos.GetNextRecover()
}
duration := nextTime.Sub(now)
r.Log.Info("Requeue request", "after", duration)
return ctrl.Result{RequeueAfter: duration}, nil
}
chaos.SetNextStart(time.Now())
duration := nextTime.Sub(now)
r.Log.Info("Requeue request", "after", duration)
return ctrl.Result{RequeueAfter: duration}, nil
}
if err := r.Update(ctx, chaos); err != nil {
r.Log.Error(err, "unable to update chaos status")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func applyAction(
ctx context.Context,
r *Reconciler,
req ctrl.Request,
duration time.Duration,
chaos v1alpha1.InnerSchedulerObject,
) error {
status := chaos.GetStatus()
r.Log.Info("Chaos action:", "chaos", chaos)
// Start to apply action
r.Log.Info("Performing Action")
if err := r.Apply(ctx, req, chaos); err != nil {
r.Log.Error(err, "failed to apply chaos action")
status.Experiment.Phase = v1alpha1.ExperimentPhaseFailed
updateError := retry.RetryOnConflict(retry.DefaultRetry, func() error {
return r.Update(ctx, chaos)
})
if updateError != nil {
r.Log.Error(updateError, "unable to update chaos finalizers")
}
return err
}
status.Experiment.StartTime = &metav1.Time{Time: time.Now()}
status.Experiment.Phase = v1alpha1.ExperimentPhaseRunning
status.Experiment.Duration = duration.String()
return nil
}
func updateFailedMessage(
ctx context.Context,
r *Reconciler,
chaos v1alpha1.InnerSchedulerObject,
err string,
) {
status := chaos.GetStatus()
status.FailedMessage = err
if err := r.Update(ctx, chaos); err != nil {
r.Log.Error(err, "unable to update chaos status")
}
}
| 1 | 17,320 | It is better to have a check to ensure the format is valid. | chaos-mesh-chaos-mesh | go |
@@ -22,14 +22,16 @@ namespace AutoRest.Go.TemplateModels
private readonly string lroDescription = " This method may poll for completion. Polling can be canceled by passing the cancel channel argument. " +
"The channel will be used to cancel polling and any outstanding HTTP requests.";
+ public readonly bool NextAlreadyDefined;
- public MethodTemplateModel(Method source, string owner, string packageName, MethodScopeProvider methodScope)
+ public MethodTemplateModel(Method source, string owner, string packageName, MethodScopeProvider methodScope, bool next)
{
this.LoadFrom(source);
MethodScope = methodScope;
Owner = owner;
PackageName = packageName;
+ NextAlreadyDefined = next;
var parameter = Parameters.Find(p => p.Type.IsPrimaryType(KnownPrimaryType.Stream)
&& !(p.Location == ParameterLocation.Body || p.Location == ParameterLocation.FormData)); | 1 | // Copyright (c) Microsoft Open Technologies, Inc. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Globalization;
using System.Linq;
using System.Net;
using AutoRest.Core.ClientModel;
using AutoRest.Go.Properties;
using AutoRest.Core.Utilities;
namespace AutoRest.Go.TemplateModels
{
public class MethodTemplateModel : Method
{
public readonly MethodScopeProvider MethodScope;
public readonly string Owner;
public readonly string PackageName;
private readonly string lroDescription = " This method may poll for completion. Polling can be canceled by passing the cancel channel argument. " +
"The channel will be used to cancel polling and any outstanding HTTP requests.";
public MethodTemplateModel(Method source, string owner, string packageName, MethodScopeProvider methodScope)
{
this.LoadFrom(source);
MethodScope = methodScope;
Owner = owner;
PackageName = packageName;
var parameter = Parameters.Find(p => p.Type.IsPrimaryType(KnownPrimaryType.Stream)
&& !(p.Location == ParameterLocation.Body || p.Location == ParameterLocation.FormData));
if (parameter != null)
{
throw new ArgumentException(string.Format(CultureInfo.InvariantCulture,
Resources.IllegalStreamingParameter, parameter.Name));
}
if (string.IsNullOrEmpty(Description))
{
Description = string.Format("sends the {0} request.", ScopedName.ToPhrase());
}
if (this.IsLongRunningOperation())
{
Description += lroDescription;
}
}
public Dictionary<string,string> DefaultValueMap
{
get
{
var defaultValueMap = new Dictionary<string, string>();
foreach(var p in Parameters)
{
if (p.IsConstant && p.ClientProperty == null)
{
defaultValueMap.Add(p.Name, p.DefaultValue);
}
}
return defaultValueMap;
}
}
private string _scopedName;
public string ScopedName
{
get
{
if (string.IsNullOrEmpty(_scopedName))
{
_scopedName = MethodScope.GetMethodName(Name, Group);
}
return _scopedName;
}
}
public string MethodSignature
{
get
{
return ScopedName + "(" + MethodParametersSignature + ")";
}
}
/// <summary>
/// Generate the method parameter declaration.
/// </summary>
public string MethodParametersSignature
{
get
{
List<string> declarations = new List<string>();
LocalParameters
.ForEach(p => declarations.Add(string.Format(
p.IsRequired || p.Type.CanBeEmpty()
? "{0} {1}"
: "{0} *{1}", p.Name, p.Type.Name)));
//for Cancelation channel option for long-running operations
if (this.IsLongRunningOperation())
{
declarations.Add("cancel <-chan struct{}");
}
return string.Join(", ", declarations);
}
}
public string MethodReturnSignature
{
get
{
return !this.IsLongRunningOperation() && this.HasReturnValue()
? string.Format("result {0}, err error", this.ReturnValue().Body.Name)
: "result autorest.Response, err error";
}
}
public string NextMethodName
{
get
{
return ScopedName + "NextResults";
}
}
public string PreparerMethodName
{
get
{
return ScopedName + "Preparer";
}
}
public string SenderMethodName
{
get
{
return ScopedName + "Sender";
}
}
public string ResponderMethodName
{
get
{
return ScopedName + "Responder";
}
}
public string HelperInvocationParameters
{
get
{
List<string> invocationParams = new List<string>();
LocalParameters
.ForEach(p => invocationParams.Add(p.Name));
if (this.IsLongRunningOperation())
{
invocationParams.Add("cancel");
}
return string.Join(", ", invocationParams);
}
}
/// <summary>
/// Return the parameters as they apopear in the method signature excluding global parameters.
/// </summary>
public IEnumerable<Parameter> LocalParameters
{
get
{
return
Parameters.Where(
p => p != null && p.IsMethodArgument() && !string.IsNullOrWhiteSpace(p.Name) && !p.SerializedName.IsApiVersion())
.OrderBy(item => !item.IsRequired);
}
}
public string ParameterValidations
{
get
{
return Parameters.Validate(HttpMethod);
}
}
public Parameter BodyParameter
{
get
{
return Parameters.BodyParameter();
}
}
public IEnumerable<Parameter> FormDataParameters
{
get
{
return Parameters.FormDataParameters();
}
}
public IEnumerable<Parameter> HeaderParameters
{
get
{
return Parameters.HeaderParameters();
}
}
public IEnumerable<Parameter> OptionalHeaderParameters
{
get
{
return Parameters.HeaderParameters(false);
}
}
public IEnumerable<Parameter> PathParameters
{
get
{
return Parameters.PathParameters();
}
}
public string PathMap
{
get
{
return PathParameters.BuildParameterMap("pathParameters");
}
}
public IEnumerable<Parameter> QueryParameters
{
get
{
return Parameters.QueryParameters();
}
}
public IEnumerable<Parameter> OptionalQueryParameters
{
get
{
return Parameters.QueryParameters(false);
}
}
public string QueryMap
{
get
{
return QueryParameters.BuildParameterMap("queryParameters");
}
}
public string FormDataMap
{
get
{
return FormDataParameters.BuildParameterMap("formDataParameters");
}
}
public List<string> ResponseCodes
{
get
{
var codes = new List<string>();
if (!Responses.ContainsKey(HttpStatusCode.OK))
{
codes.Add(GoCodeNamer.StatusCodeToGoString[HttpStatusCode.OK]);
}
foreach (var sc in Responses.Keys)
{
codes.Add(GoCodeNamer.StatusCodeToGoString[sc]);
}
return codes;
}
}
public List<string> PrepareDecorators
{
get
{
var decorators = new List<string>();
if (BodyParameter != null && !BodyParameter.Type.IsPrimaryType(KnownPrimaryType.Stream))
{
decorators.Add("autorest.AsJSON()");
}
decorators.Add(HTTPMethodDecorator);
decorators.Add("autorest.WithBaseURL(client.BaseURI)");
decorators.Add(string.Format(PathParameters.Any()
? "autorest.WithPathParameters(\"{0}\",pathParameters)"
: "autorest.WithPath(\"{0}\")",
Url));
if (BodyParameter != null && BodyParameter.IsRequired)
{
decorators.Add(string.Format(BodyParameter.Type.IsPrimaryType(KnownPrimaryType.Stream) && BodyParameter.Location == ParameterLocation.Body
? "autorest.WithFile({0})"
: "autorest.WithJSON({0})",
BodyParameter.Name));
}
if (QueryParameters.Any())
{
decorators.Add("autorest.WithQueryParameters(queryParameters)");
}
if (FormDataParameters.Any())
{
decorators.Add(
FormDataParameters.Any(p => p.Type.IsPrimaryType(KnownPrimaryType.Stream))
? "autorest.WithMultiPartFormData(formDataParameters)"
: "autorest.WithFormData(autorest.MapToValues(formDataParameters))"
);
}
if (RequestHeaders.Any())
{
foreach (var param in Parameters.Where(p => p.IsRequired && p.Location == ParameterLocation.Header))
{
decorators.Add(string.Format("autorest.WithHeader(\"{0}\",autorest.String({1}))",
param.SerializedName, param.Name));
}
}
return decorators;
}
}
public string HTTPMethodDecorator
{
get
{
switch (HttpMethod)
{
case HttpMethod.Delete: return "autorest.AsDelete()";
case HttpMethod.Get: return "autorest.AsGet()";
case HttpMethod.Head: return "autorest.AsHead()";
case HttpMethod.Options: return "autorest.AsOptions()";
case HttpMethod.Patch: return "autorest.AsPatch()";
case HttpMethod.Post: return "autorest.AsPost()";
case HttpMethod.Put: return "autorest.AsPut()";
default:
throw new ArgumentException(string.Format("The HTTP verb {0} is not supported by the Go SDK", HttpMethod));
}
}
}
public List<string> RespondDecorators
{
get
{
var decorators = new List<string>();
decorators.Add("client.ByInspecting()");
decorators.Add(string.Format("azure.WithErrorUnlessStatusCode({0})", string.Join(",", ResponseCodes.ToArray())));
if (!this.IsLongRunningOperation() && this.HasReturnValue() && !this.ReturnValue().Body.IsStreamType())
{
if (this.ReturnValue().Body is SyntheticType)
{
decorators.Add("autorest.ByUnmarshallingJSON(&result.Value)");
}
else
{
decorators.Add("autorest.ByUnmarshallingJSON(&result)");
}
}
if (!this.HasReturnValue() || !this.ReturnValue().Body.IsStreamType())
{
decorators.Add("autorest.ByClosing()");
}
return decorators;
}
}
public string Response
{
get
{
return !this.IsLongRunningOperation() && this.HasReturnValue()
? "result.Response = autorest.Response{Response: resp}"
: "result.Response = resp";
}
}
public string AutorestError(string phase, string response = null, string parameter = null)
{
return !string.IsNullOrEmpty(parameter)
? string.Format("autorest.NewErrorWithError(err, \"{0}.{1}\", \"{2}\", nil , \"{3}\'{4}\'\")", PackageName, Owner, ScopedName, phase, parameter)
: string.IsNullOrEmpty(response)
? string.Format("autorest.NewErrorWithError(err, \"{0}.{1}\", \"{2}\", nil , \"{3}\")", PackageName, Owner, ScopedName, phase)
: string.Format("autorest.NewErrorWithError(err, \"{0}.{1}\", \"{2}\", {3}, \"{4}\")", PackageName, Owner, ScopedName, response, phase);
}
}
}
| 1 | 23,107 | How about `nextAlreadyDefined` variable name in method signature? | Azure-autorest | java |
@@ -459,7 +459,7 @@ func (s *clientIntegrationSuite) TestClientDataConverter_WithChild() {
// to ensure custom data converter is used, this number might be different if client changed.
d := dc.(*testDataConverter)
- s.Equal(3, d.NumOfCallToPayloads)
+ s.Equal(2, d.NumOfCallToPayloads)
s.Equal(2, d.NumOfCallFromPayloads)
}
| 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package host
import (
"bytes"
"context"
"encoding/gob"
"errors"
"flag"
"fmt"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
historypb "go.temporal.io/api/history/v1"
"go.temporal.io/sdk/activity"
sdkclient "go.temporal.io/sdk/client"
"go.temporal.io/sdk/converter"
"go.temporal.io/sdk/temporal"
"go.temporal.io/sdk/worker"
"go.temporal.io/sdk/workflow"
"go.temporal.io/server/api/adminservice/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/rpc"
)
type (
clientIntegrationSuite struct {
// override suite.Suite.Assertions with require.Assertions; this means that s.NotNil(nil) will stop the test,
// not merely log an error
*require.Assertions
IntegrationBase
hostPort string
sdkClient sdkclient.Client
worker worker.Worker
taskQueue string
}
)
var (
ErrEncodingIsNotSet = errors.New("payload encoding metadata is not set")
ErrEncodingIsNotSupported = errors.New("payload encoding is not supported")
)
func TestClientIntegrationSuite(t *testing.T) {
flag.Parse()
suite.Run(t, new(clientIntegrationSuite))
}
func (s *clientIntegrationSuite) SetupSuite() {
s.setupSuite("testdata/clientintegrationtestcluster.yaml")
s.hostPort = "127.0.0.1:7134"
if TestFlags.FrontendAddr != "" {
s.hostPort = TestFlags.FrontendAddr
}
}
func (s *clientIntegrationSuite) TearDownSuite() {
s.tearDownSuite()
}
func (s *clientIntegrationSuite) SetupTest() {
// Have to define our overridden assertions in the test setup. If we did it earlier, s.T() will return nil
s.Assertions = require.New(s.T())
sdkClient, err := sdkclient.NewClient(sdkclient.Options{
HostPort: s.hostPort,
Namespace: s.namespace,
ConnectionOptions: sdkclient.ConnectionOptions{
DisableHealthCheck: true,
},
})
if err != nil {
s.Logger.Fatal("Error when creating SDK client", tag.Error(err))
}
s.sdkClient = sdkClient
s.taskQueue = s.randomizeStr("tq")
s.worker = worker.New(s.sdkClient, s.taskQueue, worker.Options{})
workflowFn := func(ctx workflow.Context) error {
s.Logger.Fatal("Should not reach here")
return nil
}
activityFn := func(ctx context.Context) error {
s.Logger.Fatal("Should not reach here")
return nil
}
// register dummy workflow and activity, otherwise worker won't start.
s.worker.RegisterWorkflow(workflowFn)
s.worker.RegisterActivity(activityFn)
if err := s.worker.Start(); err != nil {
s.Logger.Fatal("Error when start worker", tag.Error(err))
}
}
func (s *clientIntegrationSuite) TearDownTest() {
s.worker.Stop()
s.sdkClient.Close()
}
// testDataConverter implements encoded.DataConverter using gob
type testDataConverter struct {
NumOfCallToPayloads int // for testing to know testDataConverter is called as expected
NumOfCallFromPayloads int
}
func (tdc *testDataConverter) ToPayloads(values ...interface{}) (*commonpb.Payloads, error) {
tdc.NumOfCallToPayloads++
result := &commonpb.Payloads{}
for i, value := range values {
p, err := tdc.ToPayload(value)
if err != nil {
return nil, fmt.Errorf(
"args[%d], %T: %w", i, value, err)
}
result.Payloads = append(result.Payloads, p)
}
return result, nil
}
func (tdc *testDataConverter) FromPayloads(payloads *commonpb.Payloads, valuePtrs ...interface{}) error {
tdc.NumOfCallFromPayloads++
for i, p := range payloads.GetPayloads() {
err := tdc.FromPayload(p, valuePtrs[i])
if err != nil {
return fmt.Errorf("args[%d]: %w", i, err)
}
}
return nil
}
func (tdc *testDataConverter) ToPayload(value interface{}) (*commonpb.Payload, error) {
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
if err := enc.Encode(value); err != nil {
return nil, err
}
p := &commonpb.Payload{
Metadata: map[string][]byte{
"encoding": []byte("gob"),
},
Data: buf.Bytes(),
}
return p, nil
}
func (tdc *testDataConverter) FromPayload(payload *commonpb.Payload, valuePtr interface{}) error {
encoding, ok := payload.GetMetadata()["encoding"]
if !ok {
return ErrEncodingIsNotSet
}
e := string(encoding)
if e != "gob" {
return ErrEncodingIsNotSupported
}
return decodeGob(payload, valuePtr)
}
func (tdc *testDataConverter) ToStrings(payloads *commonpb.Payloads) []string {
var result []string
for _, p := range payloads.GetPayloads() {
result = append(result, tdc.ToString(p))
}
return result
}
func decodeGob(payload *commonpb.Payload, valuePtr interface{}) error {
dec := gob.NewDecoder(bytes.NewBuffer(payload.GetData()))
return dec.Decode(valuePtr)
}
func (tdc *testDataConverter) ToString(payload *commonpb.Payload) string {
encoding, ok := payload.GetMetadata()["encoding"]
if !ok {
return ErrEncodingIsNotSet.Error()
}
e := string(encoding)
if e != "gob" {
return ErrEncodingIsNotSupported.Error()
}
var value interface{}
err := decodeGob(payload, &value)
if err != nil {
return err.Error()
}
return fmt.Sprintf("%+v", value)
}
func newTestDataConverter() converter.DataConverter {
return &testDataConverter{}
}
func testActivity(ctx context.Context, msg string) (string, error) {
return "hello_" + msg, nil
}
func testDataConverterWorkflow(ctx workflow.Context, tl string) (string, error) {
ao := workflow.ActivityOptions{
ScheduleToStartTimeout: 20 * time.Second,
StartToCloseTimeout: 40 * time.Second,
}
ctx = workflow.WithActivityOptions(ctx, ao)
var result string
err := workflow.ExecuteActivity(ctx, testActivity, "world").Get(ctx, &result)
if err != nil {
return "", err
}
// use another converter to run activity,
// with new taskQueue so that worker with same data converter can properly process tasks.
var result1 string
ctx1 := workflow.WithDataConverter(ctx, newTestDataConverter())
ctx1 = workflow.WithTaskQueue(ctx1, tl)
err1 := workflow.ExecuteActivity(ctx1, testActivity, "world1").Get(ctx1, &result1)
if err1 != nil {
return "", err1
}
return result + "," + result1, nil
}
func (s *clientIntegrationSuite) startWorkerWithDataConverter(tl string, dataConverter converter.DataConverter) (sdkclient.Client, worker.Worker) {
sdkClient, err := sdkclient.NewClient(sdkclient.Options{
HostPort: s.hostPort,
Namespace: s.namespace,
DataConverter: dataConverter,
ConnectionOptions: sdkclient.ConnectionOptions{
DisableHealthCheck: true,
},
})
if err != nil {
s.Logger.Fatal("Error when creating SDK client", tag.Error(err))
}
worker := worker.New(sdkClient, tl, worker.Options{})
worker.RegisterActivity(testActivity)
worker.RegisterWorkflow(testChildWorkflow)
if err := worker.Start(); err != nil {
s.Logger.Fatal("Error when start worker with data converter", tag.Error(err))
}
return sdkClient, worker
}
func (s *clientIntegrationSuite) TestClientDataConverter() {
tl := "client-integration-data-converter-activity-taskqueue"
dc := newTestDataConverter()
sdkClient, worker := s.startWorkerWithDataConverter(tl, dc)
defer func() {
worker.Stop()
sdkClient.Close()
}()
id := "client-integration-data-converter-workflow"
workflowOptions := sdkclient.StartWorkflowOptions{
ID: id,
TaskQueue: s.taskQueue,
WorkflowRunTimeout: time.Minute,
}
ctx, cancel := rpc.NewContextWithTimeoutAndHeaders(time.Minute)
defer cancel()
s.worker.RegisterWorkflow(testDataConverterWorkflow)
s.worker.RegisterActivity(testActivity)
we, err := s.sdkClient.ExecuteWorkflow(ctx, workflowOptions, testDataConverterWorkflow, tl)
if err != nil {
s.Logger.Fatal("Start workflow with err", tag.Error(err))
}
s.NotNil(we)
s.True(we.GetRunID() != "")
var res string
err = we.Get(ctx, &res)
s.NoError(err)
s.Equal("hello_world,hello_world1", res)
// to ensure custom data converter is used, this number might be different if client changed.
d := dc.(*testDataConverter)
s.Equal(1, d.NumOfCallToPayloads)
s.Equal(1, d.NumOfCallFromPayloads)
}
func (s *clientIntegrationSuite) TestClientDataConverter_Failed() {
tl := "client-integration-data-converter-activity-failed-taskqueue"
sdkClient, worker := s.startWorkerWithDataConverter(tl, nil) // mismatch of data converter
defer func() {
worker.Stop()
sdkClient.Close()
}()
id := "client-integration-data-converter-failed-workflow"
workflowOptions := sdkclient.StartWorkflowOptions{
ID: id,
TaskQueue: s.taskQueue,
WorkflowRunTimeout: time.Minute,
}
ctx, cancel := rpc.NewContextWithTimeoutAndHeaders(time.Minute)
defer cancel()
s.worker.RegisterWorkflow(testDataConverterWorkflow)
s.worker.RegisterActivity(testActivity)
we, err := s.sdkClient.ExecuteWorkflow(ctx, workflowOptions, testDataConverterWorkflow, tl)
if err != nil {
s.Logger.Fatal("Start workflow with err", tag.Error(err))
}
s.NotNil(we)
s.True(we.GetRunID() != "")
var res string
err = we.Get(ctx, &res)
s.Error(err)
// Get history to make sure only the 2nd activity is failed because of mismatch of data converter
iter := s.sdkClient.GetWorkflowHistory(ctx, id, we.GetRunID(), false, 0)
completedAct := 0
failedAct := 0
for iter.HasNext() {
event, err := iter.Next()
s.NoError(err)
if event.GetEventType() == enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED {
completedAct++
}
if event.GetEventType() == enumspb.EVENT_TYPE_ACTIVITY_TASK_FAILED {
failedAct++
s.NotNil(event.GetActivityTaskFailedEventAttributes().GetFailure().GetApplicationFailureInfo())
s.True(strings.HasPrefix(event.GetActivityTaskFailedEventAttributes().GetFailure().GetMessage(), "unable to decode the activity function input payload with error"))
}
}
s.Equal(1, completedAct)
s.Equal(1, failedAct)
}
var childTaskQueue = "client-integration-data-converter-child-taskqueue"
func testParentWorkflow(ctx workflow.Context) (string, error) {
logger := workflow.GetLogger(ctx)
execution := workflow.GetInfo(ctx).WorkflowExecution
childID := fmt.Sprintf("child_workflow:%v", execution.RunID)
cwo := workflow.ChildWorkflowOptions{
WorkflowID: childID,
WorkflowRunTimeout: time.Minute,
}
ctx = workflow.WithChildOptions(ctx, cwo)
var result string
err := workflow.ExecuteChildWorkflow(ctx, testChildWorkflow, 0, 3).Get(ctx, &result)
if err != nil {
logger.Error("Parent execution received child execution failure", "error", err)
return "", err
}
childID1 := fmt.Sprintf("child_workflow1:%v", execution.RunID)
cwo1 := workflow.ChildWorkflowOptions{
WorkflowID: childID1,
WorkflowRunTimeout: time.Minute,
TaskQueue: childTaskQueue,
}
ctx1 := workflow.WithChildOptions(ctx, cwo1)
ctx1 = workflow.WithDataConverter(ctx1, newTestDataConverter())
var result1 string
err1 := workflow.ExecuteChildWorkflow(ctx1, testChildWorkflow, 0, 2).Get(ctx1, &result1)
if err1 != nil {
logger.Error("Parent execution received child execution 1 failure", "error", err1)
return "", err1
}
res := fmt.Sprintf("Complete child1 %s times, complete child2 %s times", result, result1)
logger.Info("Parent execution completed", "Result", res)
return res, nil
}
func testChildWorkflow(ctx workflow.Context, totalCount, runCount int) (string, error) {
logger := workflow.GetLogger(ctx)
logger.Info("Child workflow execution started")
if runCount <= 0 {
logger.Error("Invalid valid for run count", "RunCount", runCount)
return "", errors.New("invalid run count")
}
totalCount++
runCount--
if runCount == 0 {
result := fmt.Sprintf("Child workflow execution completed after %v runs", totalCount)
logger.Info("Child workflow completed", "Result", result)
return strconv.Itoa(totalCount), nil
}
logger.Info("Child workflow starting new run", "RunCount", runCount, "TotalCount", totalCount)
return "", workflow.NewContinueAsNewError(ctx, testChildWorkflow, totalCount, runCount)
}
func (s *clientIntegrationSuite) TestClientDataConverter_WithChild() {
dc := newTestDataConverter()
sdkClient, worker := s.startWorkerWithDataConverter(childTaskQueue, dc)
defer func() {
worker.Stop()
sdkClient.Close()
}()
id := "client-integration-data-converter-with-child-workflow"
workflowOptions := sdkclient.StartWorkflowOptions{
ID: id,
TaskQueue: s.taskQueue,
WorkflowRunTimeout: time.Minute,
}
ctx, cancel := rpc.NewContextWithTimeoutAndHeaders(time.Minute)
defer cancel()
s.worker.RegisterWorkflow(testParentWorkflow)
s.worker.RegisterWorkflow(testChildWorkflow)
we, err := s.sdkClient.ExecuteWorkflow(ctx, workflowOptions, testParentWorkflow)
if err != nil {
s.Logger.Fatal("Start workflow with err", tag.Error(err))
}
s.NotNil(we)
s.True(we.GetRunID() != "")
var res string
err = we.Get(ctx, &res)
s.NoError(err)
s.Equal("Complete child1 3 times, complete child2 2 times", res)
// to ensure custom data converter is used, this number might be different if client changed.
d := dc.(*testDataConverter)
s.Equal(3, d.NumOfCallToPayloads)
s.Equal(2, d.NumOfCallFromPayloads)
}
func (s *clientIntegrationSuite) Test_ActivityTimeouts() {
activityFn := func(ctx context.Context) error {
info := activity.GetInfo(ctx)
if info.ActivityID == "Heartbeat" {
go func() {
for i := 0; i < 4; i++ {
activity.RecordHeartbeat(ctx, i)
time.Sleep(500 * time.Millisecond)
}
}()
}
time.Sleep(5 * time.Second)
return nil
}
var err1, err2, err3, err4 error
workflowFn := func(ctx workflow.Context) error {
noRetryPolicy := &temporal.RetryPolicy{
MaximumAttempts: 1, // disable retry
}
ctx1 := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{
ActivityID: "ScheduleToStart",
ScheduleToStartTimeout: 2 * time.Second,
StartToCloseTimeout: 2 * time.Second,
TaskQueue: "NoWorkerTaskQueue",
RetryPolicy: noRetryPolicy,
})
f1 := workflow.ExecuteActivity(ctx1, activityFn)
ctx2 := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{
ActivityID: "StartToClose",
ScheduleToStartTimeout: 2 * time.Second,
StartToCloseTimeout: 2 * time.Second,
RetryPolicy: noRetryPolicy,
})
f2 := workflow.ExecuteActivity(ctx2, activityFn)
ctx3 := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{
ActivityID: "ScheduleToClose",
ScheduleToCloseTimeout: 2 * time.Second,
StartToCloseTimeout: 3 * time.Second,
RetryPolicy: noRetryPolicy,
})
f3 := workflow.ExecuteActivity(ctx3, activityFn)
ctx4 := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{
ActivityID: "Heartbeat",
StartToCloseTimeout: 10 * time.Second,
HeartbeatTimeout: 2 * time.Second,
RetryPolicy: noRetryPolicy,
})
f4 := workflow.ExecuteActivity(ctx4, activityFn)
err1 = f1.Get(ctx1, nil)
err2 = f2.Get(ctx2, nil)
err3 = f3.Get(ctx3, nil)
err4 = f4.Get(ctx4, nil)
return nil
}
s.worker.RegisterActivity(activityFn)
s.worker.RegisterWorkflow(workflowFn)
id := "integration-test-activity-timeouts"
workflowOptions := sdkclient.StartWorkflowOptions{
ID: id,
TaskQueue: s.taskQueue,
WorkflowRunTimeout: 20 * time.Second,
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
workflowRun, err := s.sdkClient.ExecuteWorkflow(ctx, workflowOptions, workflowFn)
if err != nil {
s.Logger.Fatal("Start workflow failed with err", tag.Error(err))
}
s.NotNil(workflowRun)
s.True(workflowRun.GetRunID() != "")
err = workflowRun.Get(ctx, nil)
s.NoError(err)
// verify activity timeout type
s.Error(err1)
activityErr, ok := err1.(*temporal.ActivityError)
s.True(ok)
s.Equal("ScheduleToStart", activityErr.ActivityID())
timeoutErr, ok := activityErr.Unwrap().(*temporal.TimeoutError)
s.True(ok)
s.Equal(enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START, timeoutErr.TimeoutType())
s.Error(err2)
activityErr, ok = err2.(*temporal.ActivityError)
s.True(ok)
s.Equal("StartToClose", activityErr.ActivityID())
timeoutErr, ok = activityErr.Unwrap().(*temporal.TimeoutError)
s.True(ok)
s.Equal(enumspb.TIMEOUT_TYPE_START_TO_CLOSE, timeoutErr.TimeoutType())
s.Error(err3)
activityErr, ok = err3.(*temporal.ActivityError)
s.True(ok)
s.Equal("ScheduleToClose", activityErr.ActivityID())
timeoutErr, ok = activityErr.Unwrap().(*temporal.TimeoutError)
s.True(ok)
s.Equal(enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, timeoutErr.TimeoutType())
s.Error(err4)
activityErr, ok = err4.(*temporal.ActivityError)
s.True(ok)
s.Equal("Heartbeat", activityErr.ActivityID())
timeoutErr, ok = activityErr.Unwrap().(*temporal.TimeoutError)
s.True(ok)
s.Equal(enumspb.TIMEOUT_TYPE_HEARTBEAT, timeoutErr.TimeoutType())
s.True(timeoutErr.HasLastHeartbeatDetails())
var v int
s.NoError(timeoutErr.LastHeartbeatDetails(&v))
s.Equal(3, v)
//s.printHistory(id, workflowRun.GetRunID())
}
func (s *clientIntegrationSuite) Test_BufferedQuery() {
localActivityFn := func(ctx context.Context) error {
time.Sleep(5 * time.Second) // use local activity sleep to block workflow task to force query to be buffered
return nil
}
wfStarted := sync.WaitGroup{}
wfStarted.Add(1)
workflowFn := func(ctx workflow.Context) error {
wfStarted.Done()
status := "init"
workflow.SetQueryHandler(ctx, "foo", func() (string, error) {
return status, nil
})
ctx1 := workflow.WithLocalActivityOptions(ctx, workflow.LocalActivityOptions{
ScheduleToCloseTimeout: 10 * time.Second,
})
status = "calling"
f1 := workflow.ExecuteLocalActivity(ctx1, localActivityFn)
status = "waiting"
err1 := f1.Get(ctx1, nil)
status = "done"
workflow.Sleep(ctx, 5*time.Second)
return err1
}
s.worker.RegisterWorkflow(workflowFn)
id := "integration-test-buffered-query"
workflowOptions := sdkclient.StartWorkflowOptions{
ID: id,
TaskQueue: s.taskQueue,
WorkflowRunTimeout: 20 * time.Second,
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
workflowRun, err := s.sdkClient.ExecuteWorkflow(ctx, workflowOptions, workflowFn)
if err != nil {
s.Logger.Fatal("Start workflow failed with err", tag.Error(err))
}
s.NotNil(workflowRun)
s.True(workflowRun.GetRunID() != "")
// wait until first wf task started
wfStarted.Wait()
go func() {
// sleep 2s to make sure DescribeMutableState is called after QueryWorkflow
time.Sleep(2 * time.Second)
// make DescribeMutableState call, which force mutable state to reload from db
s.adminClient.DescribeMutableState(ctx, &adminservice.DescribeMutableStateRequest{
Namespace: s.namespace,
Execution: &commonpb.WorkflowExecution{
WorkflowId: id,
RunId: workflowRun.GetRunID(),
},
})
}()
// this query will be buffered in mutable state because workflow task is in-flight.
encodedQueryResult, err := s.sdkClient.QueryWorkflow(ctx, id, workflowRun.GetRunID(), "foo")
s.NoError(err)
var queryResult string
err = encodedQueryResult.Get(&queryResult)
s.NoError(err)
s.Equal("done", queryResult)
err = workflowRun.Get(ctx, nil)
s.NoError(err)
}
func (s *clientIntegrationSuite) printHistory(workflowID string, runID string) {
iter := s.sdkClient.GetWorkflowHistory(context.Background(), workflowID, runID, false, 0)
history := &historypb.History{}
for iter.HasNext() {
event, err := iter.Next()
s.NoError(err)
history.Events = append(history.Events, event)
}
common.PrettyPrintHistory(history, s.Logger)
}
| 1 | 13,578 | In older SDK versions the data converter was called 3 times but now it is 2. This is because logic was changed deep in the interceptor chains on when values are serialized and encoded and such. Previously we invoked the data converter on results _even if there was an error_ (and most of the time they were nil). Now we don't do such a thing, resulting in fewer calls. | temporalio-temporal | go |
@@ -17,6 +17,7 @@ limitations under the License.
package main
import (
+ "github.com/google/knative-gcp/pkg/reconciler/events/build"
// The following line to load the gcp plugin (only required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
| 1 | /*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
// The following line to load the gcp plugin (only required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"github.com/google/knative-gcp/pkg/reconciler/deployment"
"github.com/google/knative-gcp/pkg/reconciler/events/auditlogs"
"github.com/google/knative-gcp/pkg/reconciler/events/pubsub"
"github.com/google/knative-gcp/pkg/reconciler/events/scheduler"
"github.com/google/knative-gcp/pkg/reconciler/events/storage"
"github.com/google/knative-gcp/pkg/reconciler/messaging/channel"
kedapullsubscription "github.com/google/knative-gcp/pkg/reconciler/pubsub/pullsubscription/keda"
staticpullsubscription "github.com/google/knative-gcp/pkg/reconciler/pubsub/pullsubscription/static"
"github.com/google/knative-gcp/pkg/reconciler/pubsub/topic"
"knative.dev/pkg/injection/sharedmain"
)
func main() {
sharedmain.Main("controller",
auditlogs.NewController,
storage.NewController,
scheduler.NewController,
pubsub.NewController,
staticpullsubscription.NewController,
kedapullsubscription.NewController,
topic.NewController,
channel.NewController,
deployment.NewController,
)
}
| 1 | 10,733 | formatting... can this import go next to the other sources ones? | google-knative-gcp | go |
@@ -286,10 +286,10 @@ const char *options_list_str =
" -early Requests early injection (the default).\n"
" -late Requests late injection.\n"
# endif
- " -attach <pid> Attach to the process with the given pid. Pass 0\n"
- " for pid to launch and inject into a new process.\n"
" -logdir <dir> Logfiles will be stored in this directory.\n"
# endif
+ " -attach <pid> Attach to the process with the given pid. Pass 0\n"
+ " for pid to launch and inject into a new process.\n"
" -use_dll <dll> Inject given dll instead of configured DR dll.\n"
" -force Inject regardless of configuration.\n"
" -exit0 Return a 0 exit code instead of the app's exit code.\n" | 1 | /* **********************************************************
* Copyright (c) 2011-2018 Google, Inc. All rights reserved.
* Copyright (c) 2008-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* compile with make VMAP=1 for a vmap version (makefile defaults to VMSAFE version) */
#include "configure.h"
#ifdef WINDOWS
# define WIN32_LEAN_AND_MEAN
# define UNICODE
# define _UNICODE
# include <windows.h>
# include <io.h>
# include "config.h"
# include "share.h"
#endif
#ifdef UNIX
# include <errno.h>
# include <fcntl.h>
# include <unistd.h>
# include <sys/stat.h>
# include <sys/mman.h>
# include <sys/wait.h>
#endif
#include <string.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include <ctype.h>
#include "globals_shared.h"
#include "dr_config.h" /* MUST be before share.h (it sets HOT_PATCHING_INTERFACE) */
#include "dr_inject.h"
#include "dr_frontend.h"
typedef enum _action_t {
action_none,
action_nudge,
action_register,
action_unregister,
action_list,
} action_t;
static bool verbose;
static bool quiet;
static bool DR_dll_not_needed = false;
static bool nocheck;
#define die() exit(1)
#define fatal(msg, ...) \
do { \
fprintf(stderr, "ERROR: " msg "\n", ##__VA_ARGS__); \
fflush(stderr); \
exit(1); \
} while (0)
/* up to caller to call die() if necessary */
#define error(msg, ...) \
do { \
fprintf(stderr, "ERROR: " msg "\n", ##__VA_ARGS__); \
fflush(stderr); \
} while (0)
#define warn(msg, ...) \
do { \
if (!quiet) { \
fprintf(stderr, "WARNING: " msg "\n", ##__VA_ARGS__); \
fflush(stderr); \
} \
} while (0)
#define info(msg, ...) \
do { \
if (verbose) { \
fprintf(stderr, "INFO: " msg "\n", ##__VA_ARGS__); \
fflush(stderr); \
} \
} while (0)
#ifdef DRCONFIG
# define TOOLNAME "drconfig"
#elif defined(DRRUN)
# define TOOLNAME "drrun"
#elif defined(DRINJECT)
# define TOOLNAME "drinject"
#endif
const char *usage_str =
#ifdef DRCONFIG
"USAGE: " TOOLNAME " [options]\n"
" or: " TOOLNAME " [options] [-ops \"<DR options>\"] -c <client> [client options]\n"
" or: " TOOLNAME " [options] [-ops \"<DR options>\"] -t <tool> [tool options]\n";
#elif defined(DRRUN) || defined(DRINJECT)
"USAGE: " TOOLNAME " [options] <app and args to run>\n"
" or: " TOOLNAME " [options] -- <app and args to run>\n"
# if defined(DRRUN)
" or: " TOOLNAME " [options] [DR options] -- <app and args to run>\n"
" or: " TOOLNAME " [options] [DR options] -c <client> [client options]"
" -- <app and args to run>\n"
" or: " TOOLNAME " [options] [DR options] -t <tool> [tool options]"
" -- <app and args to run>\n"
# endif
;
#endif
const char *options_list_str =
"\n" TOOLNAME " options (these are distinct from DR runtime options):\n"
" -version Display version information\n"
" -verbose Display additional information\n"
" -quiet Do not display warnings\n"
" -nocheck Do not fail due to invalid DynamoRIO installation or app\n"
#ifdef DRCONFIG
" -reg <process> Register <process> to run under DR\n"
" -unreg <process> Unregister <process> from running under DR\n"
" -isreg <process> Display whether <process> is registered and if so its\n"
" configuration\n"
# ifdef WINDOWS
" -list_registered Display all registered processes and their configuration\n"
# endif /* WINDOWS */
#endif /* DRCONFIG */
" -root <root> DR root directory\n"
#if defined(DRCONFIG) || defined(DRRUN)
# if defined(MF_API) && defined(PROBE_API)
" -mode <mode> DR mode (code, probe, or security)\n"
# elif defined(PROBE_API)
" -mode <mode> DR mode (code or probe)\n"
# elif defined(MF_API)
" -mode <mode> DR mode (code or security)\n"
# else
/* No mode argument, is always code. */
# endif
#endif
#ifdef DRCONFIG
/* FIXME i#840: Syswide NYI on Linux. */
# ifdef WINDOWS
" -syswide_on Set up systemwide injection so that registered\n"
" applications will run under DR however they are\n"
" launched. Otherwise, drinject must be used to\n"
" launch a target configured application under DR.\n"
" This option requires administrative privileges.\n"
" -syswide_off Disable systemwide injection.\n"
" This option requires administrative privileges.\n"
# endif
" -global Use global configuration files instead of local\n"
" user-private configuration files. The global\n"
" config dir must be set up ahead of time.\n"
" This option may require administrative privileges.\n"
" If a local file already exists it will take precedence.\n"
" -norun Create a configuration that excludes the application\n"
" from running under DR control. Useful for following\n"
" all child processes except a handful (blacklist).\n"
#endif
" -debug Use the DR debug library\n"
" -32 Target 32-bit or WOW64 applications\n"
" -64 Target 64-bit (non-WOW64) applications\n"
#if defined(DRCONFIG) || defined(DRRUN)
"\n"
" -ops \"<options>\" Specify DR runtime options. When specifying\n"
" multiple options, enclose the entire list of\n"
" options in quotes, or repeat the -ops.\n"
" Alternatively, if the application is separated\n"
" by \"--\" or if -c or -t is specified, the -ops may be\n"
" omitted and DR options listed prior to \"--\", -c,\n"
" and -t, without quotes.\n"
"\n"
" -t <toolname> Registers a pre-configured tool to run alongside DR.\n"
" A tool is a client with a configuration file\n"
" that sets the client options and path, providing a\n"
" convenient launching command via this -t parameter.\n"
# ifdef DRRUN
" Available tools include: %s.\n"
# endif
"\n"
" -c <path> <options>*\n"
" Registers one client to run alongside DR. Assigns\n"
" the client an id of 0. All remaining arguments\n"
" until the -- arg before the app are interpreted as\n"
" client options. Must come after all drrun and DR\n"
" ops. Incompatible with -client. Requires using --\n"
" to separate the app executable. Neither the path nor\n"
" the options may contain semicolon characters or\n"
" all 3 quote characters (\", \', `).\n"
"\n"
" -client <path> <ID> \"<options>\"\n"
" Use -c instead, unless you need to set the client ID.\n"
" Registers one or more clients to run alongside DR.\n"
" This option is only valid when registering a\n"
" process. The -client option takes three arguments:\n"
" the full path to a client library, a unique 8-digit\n"
" hex ID, and an optional list of client options\n"
" (use \"\" to specify no options). Multiple clients\n"
" can be installed via multiple -client options. In\n"
" this case, clients specified first on the command\n"
" line have higher priority. Neither the path nor\n"
" the options may contain semicolon characters or\n"
" all 3 quote characters (\", \', `).\n"
" This option must precede any options to DynamoRIO.\n"
#endif
#ifdef DRCONFIG
"\n"
# ifdef WINDOWS
" Note that nudging 64-bit processes is not yet supported.\n"
" -nudge <process> <client ID> <argument>\n"
" Nudge the client with ID <client ID> in all running\n"
" processes with name <process>, and pass <argument>\n"
" to the nudge callback. <client ID> must be the\n"
" 8-digit hex ID of the target client. <argument>\n"
" should be a hex literal (0, 1, 3f etc.).\n"
" -nudge_pid <process_id> <client ID> <argument>\n"
" Nudge the client with ID <client ID> in the process with\n"
" id <process_id>, and pass <argument> to the nudge\n"
" callback. <client ID> must be the 8-digit hex ID\n"
" of the target client. <argument> should be a hex\n"
" literal (0, 1, 3f etc.).\n"
" -nudge_all <client ID> <argument>\n"
" Nudge the client with ID <client ID> in all running\n"
" processes and pass <argument> to the nudge callback.\n"
" <client ID> must be the 8-digit hex ID of the target\n"
" client. <argument> should be a hex literal\n"
" (0, 1, 3f etc.)\n"
" -nudge_timeout <ms> Max time (in milliseconds) to wait for a nudge to\n"
" finish before continuing. The default is an infinite\n"
" wait. A value of 0 means don't wait for nudges to\n"
" complete."
# else /* WINDOWS */
/* FIXME i#840: integrate nudgeunix into drconfig on Unix */
"Note: please use the nudgeunix tool to nudge processes on Unix.\n";
# endif /* !WINDOWS */
#else /* DRCONFIG */
" -no_wait Return immediately: do not wait for application exit.\n"
" -s <seconds> Kill the application if it runs longer than the\n"
" specified number of seconds.\n"
" -m <minutes> Kill the application if it runs longer than the\n"
" specified number of minutes.\n"
" -h <hours> Kill the application if it runs longer than the\n"
" specified number of hours.\n"
# ifdef UNIX
" -killpg Create a new process group for the app. If the app\n"
" times out, kill the entire process group. This forces\n"
" the child to be a new process with a new pid, rather\n"
" than reusing the parent's pid.\n"
# endif
" -stats Print /usr/bin/time-style elapsed time and memory used.\n"
" -mem Print memory usage statistics.\n"
" -pidfile <file> Print the pid of the child process to the given file.\n"
" -no_inject Run the application natively.\n"
" -static Do not inject under the assumption that the application\n"
" is statically linked with DynamoRIO. Instead, trigger\n"
" automated takeover.\n"
# ifdef UNIX /* FIXME i#725: Windows attach NYI */
# ifndef MACOS /* XXX i#1285: private loader NYI on MacOS */
" -early Requests early injection (the default).\n"
" -late Requests late injection.\n"
# endif
" -attach <pid> Attach to the process with the given pid. Pass 0\n"
" for pid to launch and inject into a new process.\n"
" -logdir <dir> Logfiles will be stored in this directory.\n"
# endif
" -use_dll <dll> Inject given dll instead of configured DR dll.\n"
" -force Inject regardless of configuration.\n"
" -exit0 Return a 0 exit code instead of the app's exit code.\n"
"\n"
" <app and args> Application command line to execute under DR.\n"
#endif /* !DRCONFIG */
;
static bool
does_file_exist(const char *path)
{
bool ret = false;
return (drfront_access(path, DRFRONT_EXIST, &ret) == DRFRONT_SUCCESS && ret);
}
#if defined(DRRUN) || defined(DRINJECT)
static bool
search_env(const char *fname, const char *env_var, char *full_path,
const size_t full_path_size)
{
bool ret = false;
if (drfront_searchenv(fname, env_var, full_path, full_path_size, &ret) !=
DRFRONT_SUCCESS ||
!ret) {
full_path[0] = '\0';
return false;
}
return true;
}
#endif
#ifdef UNIX
# ifndef DRCONFIG
static int
GetLastError(void)
{
return errno;
}
# endif /* DRCONFIG */
#endif /* UNIX */
static void
get_absolute_path(const char *src, char *buf, size_t buflen /*# elements*/)
{
drfront_status_t sc = drfront_get_absolute_path(src, buf, buflen);
if (sc != DRFRONT_SUCCESS)
fatal("failed (status=%d) to convert %s to an absolute path", sc, src);
}
/* Opens a filename and mode that are in utf8 */
static FILE *
fopen_utf8(const char *path, const char *mode)
{
#ifdef WINDOWS
TCHAR wpath[MAXIMUM_PATH];
TCHAR wmode[MAXIMUM_PATH];
if (drfront_char_to_tchar(path, wpath, BUFFER_SIZE_ELEMENTS(wpath)) !=
DRFRONT_SUCCESS ||
drfront_char_to_tchar(mode, wmode, BUFFER_SIZE_ELEMENTS(wmode)) !=
DRFRONT_SUCCESS)
return NULL;
return _tfopen(wpath, wmode);
#else
return fopen(path, mode);
#endif
}
static char tool_list[MAXIMUM_PATH];
static void
print_tool_list(FILE *stream)
{
#ifdef DRRUN
if (tool_list[0] != '\0')
fprintf(stream, " available tools include: %s\n", tool_list);
#endif
}
/* i#1509: we want to list the available tools for the -t option.
* Since we don't have a dir iterator we use a list of tools
* in a text file tools/list{32,64} which we create at
* install time. Thus we only expect to have it for a package build.
*/
static void
read_tool_list(const char *dr_root, dr_platform_t dr_platform)
{
FILE *f;
char list_file[MAXIMUM_PATH];
size_t sofar = 0;
const char *arch = IF_X64_ELSE("64", "32");
if (dr_platform == DR_PLATFORM_32BIT)
arch = "32";
else if (dr_platform == DR_PLATFORM_64BIT)
arch = "64";
_snprintf(list_file, BUFFER_SIZE_ELEMENTS(list_file), "%s/tools/list%s", dr_root,
arch);
NULL_TERMINATE_BUFFER(list_file);
f = fopen_utf8(list_file, "r");
if (f == NULL) {
/* no visible error: we only expect to have a list for a package build */
return;
}
while (fgets(tool_list + sofar,
(int)(BUFFER_SIZE_ELEMENTS(tool_list) - sofar - 1 /*space*/),
f) != NULL) {
NULL_TERMINATE_BUFFER(tool_list);
sofar += strlen(tool_list + sofar);
tool_list[sofar - 1] = ','; /* replace newline with comma */
/* add space */
if (sofar < BUFFER_SIZE_ELEMENTS(tool_list))
tool_list[sofar++] = ' ';
}
fclose(f);
tool_list[sofar - 2] = '\0';
NULL_TERMINATE_BUFFER(tool_list);
}
#define usage(list_ops, msg, ...) \
do { \
FILE *stream = (list_ops == true) ? stdout : stderr; \
if ((msg)[0] != '\0') \
fprintf(stderr, "ERROR: " msg "\n\n", ##__VA_ARGS__); \
fprintf(stream, "%s", usage_str); \
print_tool_list(stream); \
if (list_ops) { \
fprintf(stream, options_list_str, tool_list); \
exit(0); \
} else { \
fprintf(stream, "Run with -help to see " TOOLNAME " option list\n"); \
} \
die(); \
} while (0)
/* Unregister a process */
bool
unregister_proc(const char *process, process_id_t pid, bool global,
dr_platform_t dr_platform)
{
dr_config_status_t status = dr_unregister_process(process, pid, global, dr_platform);
if (status == DR_PROC_REG_INVALID) {
error("no existing registration for %s", process == NULL ? "<null>" : process);
return false;
} else if (status == DR_FAILURE) {
error("unregistration failed for %s", process == NULL ? "<null>" : process);
return false;
}
return true;
}
/* Check if the provided root directory actually has the files we
* expect. Returns whether a fatal problem.
*/
static bool
check_dr_root(const char *dr_root, bool debug, dr_platform_t dr_platform, bool preinject,
bool report)
{
int i;
char buf[MAXIMUM_PATH];
bool ok = true;
/* FIXME i#1569: port DynamoRIO to AArch64 so we can enable the check warning */
bool nowarn = IF_X86_ELSE(false, true);
const char *checked_files[] = {
#ifdef WINDOWS
"lib32\\drpreinject.dll", "lib32\\release\\dynamorio.dll",
"lib32\\debug\\dynamorio.dll", "lib64\\drpreinject.dll",
"lib64\\release\\dynamorio.dll", "lib64\\debug\\dynamorio.dll"
#elif defined(MACOS)
"lib32/debug/libdrpreload.dylib", "lib32/debug/libdynamorio.dylib",
"lib32/release/libdrpreload.dylib", "lib32/release/libdynamorio.dylib",
"lib64/debug/libdrpreload.dylib", "lib64/debug/libdynamorio.dylib",
"lib64/release/libdrpreload.dylib", "lib64/release/libdynamorio.dylib"
#else /* LINUX */
/* With early injection the default, we don't require preload to exist. */
"lib32/debug/libdynamorio.so", "lib32/release/libdynamorio.so",
"lib64/debug/libdynamorio.so", "lib64/release/libdynamorio.so"
#endif
};
const char *arch = IF_X64_ELSE("lib64", "lib32");
if (dr_platform == DR_PLATFORM_32BIT)
arch = "lib32";
else if (dr_platform == DR_PLATFORM_64BIT)
arch = "lib64";
if (DR_dll_not_needed) {
/* assume user knows what he's doing */
return true;
}
/* don't warn if running from a build dir (i#458) which we attempt to detect
* by looking for CMakeCache.txt in the root dir
* (warnings can also be suppressed via -quiet)
*/
_snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%s/%s", dr_root, "CMakeCache.txt");
if (does_file_exist(buf))
nowarn = true;
for (i = 0; i < BUFFER_SIZE_ELEMENTS(checked_files); i++) {
_snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%s/%s", dr_root, checked_files[i]);
if (!does_file_exist(buf)) {
ok = false;
if (!nocheck &&
((preinject && strstr(checked_files[i], "drpreinject")) ||
(!preinject && debug && strstr(checked_files[i], "debug") != NULL) ||
(!preinject && !debug && strstr(checked_files[i], "release") != NULL)) &&
strstr(checked_files[i], arch) != NULL) {
/* We don't want to create a .1config file that won't be freed
* b/c the core is never injected
*/
if (report) {
error("cannot find required file %s\n"
"Use -root to specify a proper DynamoRIO root directory.",
buf);
}
return false;
} else {
if (strstr(checked_files[i], arch) == NULL) {
/* Support a single-bitwidth package. */
ok = true;
} else if (!nowarn)
warn("cannot find %s: is this an incomplete installation?", buf);
}
}
}
if (!ok && !nowarn)
warn("%s does not appear to be a valid DynamoRIO root", dr_root);
return true;
}
/* Register a process to run under DR */
bool
register_proc(const char *process, process_id_t pid, bool global, const char *dr_root,
const dr_operation_mode_t dr_mode, bool debug, dr_platform_t dr_platform,
const char *extra_ops)
{
dr_config_status_t status;
assert(dr_root != NULL);
if (!does_file_exist(dr_root)) {
error("cannot access DynamoRIO root directory %s", dr_root);
return false;
}
#ifdef CLIENT_INTERFACE
if (dr_mode == DR_MODE_NONE) {
error("you must provide a DynamoRIO mode");
return false;
}
#endif
/* warn if the DR root directory doesn't look right, unless -norun,
* in which case don't bother
*/
if (dr_mode != DR_MODE_DO_NOT_RUN &&
!check_dr_root(dr_root, debug, dr_platform, false /*!pre*/, true /*report*/))
return false;
if (dr_process_is_registered(process, pid, global, dr_platform, NULL, NULL, NULL,
NULL)) {
warn("overriding existing registration");
if (!unregister_proc(process, pid, global, dr_platform))
return false;
}
status = dr_register_process(process, pid, global, dr_root, dr_mode, debug,
dr_platform, extra_ops);
if (status != DR_SUCCESS) {
/* USERPROFILE is not set by default over cygwin ssh */
char buf[MAXIMUM_PATH];
#ifdef WINDOWS
if (drfront_get_env_var("USERPROFILE", buf, BUFFER_SIZE_ELEMENTS(buf)) ==
DRFRONT_ERROR &&
drfront_get_env_var("DYNAMORIO_CONFIGDIR", buf, BUFFER_SIZE_ELEMENTS(buf)) ==
DRFRONT_ERROR) {
error("process %s registration failed: "
"neither USERPROFILE nor DYNAMORIO_CONFIGDIR env var set!",
process == NULL ? "<null>" : process);
} else {
#endif
if (status == DR_CONFIG_DIR_NOT_FOUND) {
dr_get_config_dir(global, true /*tmp*/, buf, BUFFER_SIZE_ELEMENTS(buf));
error("process %s registration failed: check config dir %s permissions",
process == NULL ? "<null>" : process, buf);
#ifdef ANDROID
error("for Android apps, set TMPDIR to /data/data/com.your.app");
#endif
} else {
error("process %s registration failed",
process == NULL ? "<null>" : process);
}
#ifdef WINDOWS
}
#endif
return false;
}
return true;
}
/* Check if the specified client library actually exists. */
void
check_client_lib(const char *client_lib)
{
if (!does_file_exist(client_lib)) {
warn("%s does not exist", client_lib);
}
}
bool
register_client(const char *process_name, process_id_t pid, bool global,
dr_platform_t dr_platform, client_id_t client_id, const char *path,
const char *options)
{
size_t priority;
dr_config_status_t status;
if (!dr_process_is_registered(process_name, pid, global, dr_platform, NULL, NULL,
NULL, NULL)) {
error("can't register client: process %s is not registered",
process_name == NULL ? "<null>" : process_name);
return false;
}
check_client_lib(path);
/* just append to the existing client list */
priority = dr_num_registered_clients(process_name, pid, global, dr_platform);
info("registering client with id=%d path=|%s| ops=|%s|", client_id, path, options);
status = dr_register_client(process_name, pid, global, dr_platform, client_id,
priority, path, options);
if (status != DR_SUCCESS) {
if (status == DR_CONFIG_STRING_TOO_LONG) {
error("client %s registration failed: option string too long: \"%s\"",
path == NULL ? "<null>" : path, options);
} else if (status == DR_CONFIG_OPTIONS_INVALID) {
error("client %s registration failed: options cannot contain ';' or all "
"3 quote types: %s",
path == NULL ? "<null>" : path, options);
} else {
error("client %s registration failed with error code %d",
path == NULL ? "<null>" : path, status);
}
return false;
}
return true;
}
#if defined(WINDOWS) || defined(DRRUN) || defined(DRCONFIG)
static const char *
platform_name(dr_platform_t platform)
{
return (platform == DR_PLATFORM_64BIT IF_X64(|| platform == DR_PLATFORM_DEFAULT))
? "64-bit"
: "32-bit/WOW64";
}
#endif
/* FIXME i#840: Port registered process iterator. */
#ifdef WINDOWS
static void
list_process(char *name, bool global, dr_platform_t platform,
dr_registered_process_iterator_t *iter)
{
char name_buf[MAXIMUM_PATH] = { 0 };
char root_dir_buf[MAXIMUM_PATH] = { 0 };
dr_operation_mode_t dr_mode;
bool debug;
char dr_options[DR_MAX_OPTIONS_LENGTH] = { 0 };
dr_client_iterator_t *c_iter;
if (name == NULL) {
dr_registered_process_iterator_next(iter, name_buf, root_dir_buf, &dr_mode,
&debug, dr_options);
name = name_buf;
} else if (!dr_process_is_registered(name, 0, global, platform, root_dir_buf,
&dr_mode, &debug, dr_options)) {
printf("Process %s not registered for %s\n", name, platform_name(platform));
return;
}
if (dr_mode == DR_MODE_DO_NOT_RUN) {
printf("Process %s registered to NOT RUN on %s\n", name, platform_name(platform));
} else {
printf("Process %s registered for %s\n", name, platform_name(platform));
}
printf("\tRoot=\"%s\" Debug=%s\n\tOptions=\"%s\"\n", root_dir_buf,
debug ? "yes" : "no", dr_options);
c_iter = dr_client_iterator_start(name, 0, global, platform);
while (dr_client_iterator_hasnext(c_iter)) {
client_id_t id;
size_t client_pri;
char client_path[MAXIMUM_PATH] = { 0 };
char client_opts[DR_MAX_OPTIONS_LENGTH] = { 0 };
dr_client_iterator_next(c_iter, &id, &client_pri, client_path, client_opts);
printf("\tClient=0x%08x Priority=%d\n\t\tPath=\"%s\"\n\t\tOptions=\"%s\"\n", id,
(uint)client_pri, client_path, client_opts);
}
dr_client_iterator_stop(c_iter);
}
#endif /* WINDOWS */
#ifndef DRCONFIG
/* i#200/PR 459481: communicate child pid via file */
static void
write_pid_to_file(const char *pidfile, process_id_t pid)
{
FILE *f = fopen_utf8(pidfile, "w");
if (f == NULL) {
warn("cannot open %s: %d\n", pidfile, GetLastError());
} else {
char pidbuf[16];
ssize_t written;
_snprintf(pidbuf, BUFFER_SIZE_ELEMENTS(pidbuf), "%d\n", pid);
NULL_TERMINATE_BUFFER(pidbuf);
written = fwrite(pidbuf, 1, strlen(pidbuf), f);
assert(written == strlen(pidbuf));
fclose(f);
}
}
#endif /* DRCONFIG */
#if defined(DRCONFIG) || defined(DRRUN)
static void
append_client(const char *client, int id, const char *client_ops,
char client_paths[MAX_CLIENT_LIBS][MAXIMUM_PATH],
client_id_t client_ids[MAX_CLIENT_LIBS],
const char *client_options[MAX_CLIENT_LIBS], size_t *num_clients)
{
/* We support an empty client for native -t usage */
if (client[0] != '\0') {
get_absolute_path(client, client_paths[*num_clients],
BUFFER_SIZE_ELEMENTS(client_paths[*num_clients]));
NULL_TERMINATE_BUFFER(client_paths[*num_clients]);
info("client %d path: %s", (int)*num_clients, client_paths[*num_clients]);
}
client_ids[*num_clients] = id;
client_options[*num_clients] = client_ops;
(*num_clients)++;
}
#endif
/* Appends a space-separated option string to buf. A space is appended only if
* the buffer is non-empty. Aborts on buffer overflow. Always null terminates
* the string.
* XXX: Use print_to_buffer.
*/
static void
add_extra_option(char *buf, size_t bufsz, size_t *sofar, const char *fmt, ...)
{
ssize_t len;
va_list ap;
if (*sofar > 0 && *sofar < bufsz)
buf[(*sofar)++] = ' '; /* Add a space. */
va_start(ap, fmt);
len = vsnprintf(buf + *sofar, bufsz - *sofar, fmt, ap);
va_end(ap);
if (len < 0 || (size_t)len >= bufsz) {
error("option string too long, buffer overflow");
die();
}
*sofar += len;
/* be paranoid: though usually many calls in a row and could delay until end */
buf[bufsz - 1] = '\0';
}
#if defined(DRCONFIG) || defined(DRRUN)
/* Returns the path to the client library. Appends to extra_ops.
* A tool config file must contain one of these line types:
* CLIENT_ABS=<absolute path to client>
* CLIENT_REL=<path to client relative to DR root>
* It can contain as many DR_OP= lines as desired. Each must contain
* one DynamoRIO option token:
* DR_OP=<DR option token>
* It can also contain TOOL_OP= lines for tool options, though normally
* tool default options should just be set in the tool:
* TOOL_OP=<tool option token>
* We take one token per line rather than a string of options to avoid
* having to do any string parsing.
* DR ops go last (thus, user can't override); tool ops go first.
*
* We also support tools with their own frontend launcher via the following
* tool config file lines:
* FRONTEND_ABS=<absolute path to frontend>
* FRONTEND_REL=<path to frontend relative to DR root>
* If either is present, drrun will launch the frontend and pass it the
* tool options followed by the app and its options.
* The path to DR can be included in the frontend options via this line:
* TOOL_OP_DR_PATH
* The options to DR can be included in a single token, preceded by a prefix,
* via this line:
* TOOL_OP_DR_BUNDLE=<prefix>
*
* A notification message can be presented to the user with:
* USER_NOTICE=This tool is currently experimental. Please report issues to <url>.
*/
static bool
read_tool_file(const char *toolname, const char *dr_root, dr_platform_t dr_platform,
char *client, size_t client_size, char *ops, size_t ops_size,
size_t *ops_sofar, char *tool_ops, size_t tool_ops_size,
size_t *tool_ops_sofar, char *native_path OUT, size_t native_path_size)
{
FILE *f;
char config_file[MAXIMUM_PATH];
char line[MAXIMUM_PATH];
bool found_client = false;
const char *arch = IF_X64_ELSE("64", "32");
if (dr_platform == DR_PLATFORM_32BIT)
arch = "32";
else if (dr_platform == DR_PLATFORM_64BIT)
arch = "64";
_snprintf(config_file, BUFFER_SIZE_ELEMENTS(config_file), "%s/tools/%s.drrun%s",
dr_root, toolname, arch);
NULL_TERMINATE_BUFFER(config_file);
info("reading tool config file %s", config_file);
f = fopen_utf8(config_file, "r");
if (f == NULL) {
error("cannot find tool config file %s", config_file);
return false;
}
while (fgets(line, BUFFER_SIZE_ELEMENTS(line), f) != NULL) {
ssize_t len;
NULL_TERMINATE_BUFFER(line);
len = strlen(line) - 1;
while (len >= 0 && (line[len] == '\n' || line[len] == '\r')) {
line[len] = '\0';
len--;
}
if (line[0] == '#') {
continue;
} else if (strstr(line, "CLIENT_REL=") == line) {
_snprintf(client, client_size, "%s/%s", dr_root,
line + strlen("CLIENT_REL="));
client[client_size - 1] = '\0';
found_client = true;
if (native_path[0] != '\0') {
add_extra_option(tool_ops, tool_ops_size, tool_ops_sofar, "\"%s\"",
client);
}
} else if (strstr(line, "CLIENT_ABS=") == line) {
strncpy(client, line + strlen("CLIENT_ABS="), client_size);
found_client = true;
if (native_path[0] != '\0') {
add_extra_option(tool_ops, tool_ops_size, tool_ops_sofar, "\"%s\"",
client);
}
} else if (strstr(line, "DR_OP=") == line) {
if (strcmp(line, "DR_OP=") != 0) {
add_extra_option(ops, ops_size, ops_sofar, "\"%s\"",
line + strlen("DR_OP="));
}
} else if (strstr(line, "TOOL_OP=") == line) {
if (strcmp(line, "TOOL_OP=") != 0) {
add_extra_option(tool_ops, tool_ops_size, tool_ops_sofar, "\"%s\"",
line + strlen("TOOL_OP="));
}
# ifdef DRRUN /* native only supported for drrun */
} else if (strstr(line, "FRONTEND_ABS=") == line) {
_snprintf(native_path, native_path_size, "%s",
line + strlen("FRONTEND_ABS="));
native_path[native_path_size - 1] = '\0';
found_client = true;
} else if (strstr(line, "FRONTEND_REL=") == line) {
_snprintf(native_path, native_path_size, "%s/%s", dr_root,
line + strlen("FRONTEND_REL="));
native_path[native_path_size - 1] = '\0';
found_client = true;
} else if (strstr(line, "TOOL_OP_DR_PATH") == line) {
add_extra_option(tool_ops, tool_ops_size, tool_ops_sofar, "\"%s\"", dr_root);
} else if (strstr(line, "TOOL_OP_DR_BUNDLE=") == line) {
if (strcmp(line, "TOOL_OP_DR_BUNDLE=") != 0) {
add_extra_option(tool_ops, tool_ops_size, tool_ops_sofar, "%s `%s`",
line + strlen("TOOL_OP_DR_BUNDLE="), ops);
}
# else
} else if (strstr(line, "FRONTEND_ABS=") == line ||
strstr(line, "FRONTEND_REL=") == line ||
strstr(line, "TOOL_OP_DR_PATH") == line ||
strstr(line, "TOOL_OP_DR_BUNDLE=") == line) {
usage(false, "this tool's config only works with drrun, not drconfig");
return false;
# endif
} else if (strstr(line, "USER_NOTICE=") == line) {
warn("%s", line + strlen("USER_NOTICE="));
} else if (line[0] != '\0') {
error("tool config file is malformed: unknown line %s", line);
return false;
}
}
fclose(f);
return found_client;
}
#endif /* DRCONFIG || DRRUN */
#ifdef DRRUN
/* This parser modifies the string, adding nulls to split it up in place.
* Caller should continue iterating until *token == NULL.
*/
static char *
split_option_token(char *s, char **token OUT, bool split)
{
bool quoted = false;
char endquote = '\0';
if (s == NULL) {
*token = NULL;
return NULL;
}
/* first skip leading whitespace */
while (*s != '\0' && isspace(*s))
s++;
if (*s == '\"' || *s == '\'' || *s == '`') {
quoted = true;
endquote = *s;
s++;
}
*token = (*s == '\0' ? NULL : s);
while (*s != '\0' && ((!quoted && !isspace(*s)) || (quoted && *s != endquote)))
s++;
if (*s == '\0')
return NULL;
else {
if (quoted && !split)
s++;
if (split && *s != '\0')
*s++ = '\0';
return s;
}
}
/* Caller must free() the returned argv array.
* This routine writes to tool_ops.
*/
static const char **
switch_to_native_tool(const char **app_argv, const char *native_tool, char *tool_ops)
{
const char **new_argv, **arg;
char *s, *token;
uint count, i;
for (arg = app_argv, count = 0; *arg != NULL; arg++, count++)
; /* empty */
for (s = split_option_token(tool_ops, &token, false /*do not mutate*/); token != NULL;
s = split_option_token(s, &token, false /*do not mutate*/)) {
count++;
}
count++; /* for native_tool path */
count++; /* for "--" */
count++; /* for NULL */
new_argv = (const char **)malloc(count * sizeof(char *));
i = 0;
new_argv[i++] = native_tool;
for (s = split_option_token(tool_ops, &token, true); token != NULL;
s = split_option_token(s, &token, true)) {
new_argv[i++] = token;
}
new_argv[i++] = "--";
for (arg = app_argv; *arg != NULL; arg++)
new_argv[i++] = *arg;
new_argv[i++] = NULL;
assert(i == count);
if (verbose) {
char buf[MAXIMUM_PATH * 2];
char *c = buf;
for (i = 0; i < count - 1; i++) {
ssize_t len = _snprintf(c, BUFFER_SIZE_ELEMENTS(buf) - (c - buf), " \"%s\"",
new_argv[i]);
if (len < 0 || (size_t)len >= BUFFER_SIZE_ELEMENTS(buf) - (c - buf))
break;
c += len;
}
NULL_TERMINATE_BUFFER(buf);
info("native tool cmdline: %s", buf);
}
return new_argv;
}
#endif /* DRRUN */
int
_tmain(int argc, TCHAR *targv[])
{
char *dr_root = NULL;
char client_paths[MAX_CLIENT_LIBS][MAXIMUM_PATH];
#if defined(DRCONFIG) || defined(DRRUN)
char *process = NULL;
const char *client_options[MAX_CLIENT_LIBS] = {
NULL,
};
client_id_t client_ids[MAX_CLIENT_LIBS] = {
0,
};
size_t num_clients = 0;
char single_client_ops[DR_MAX_OPTIONS_LENGTH];
#endif
#ifndef DRINJECT
# if defined(MF_API) || defined(PROBE_API)
/* must set -mode */
dr_operation_mode_t dr_mode = DR_MODE_NONE;
# else
/* only one choice so no -mode */
# ifdef CLIENT_INTERFACE
dr_operation_mode_t dr_mode = DR_MODE_CODE_MANIPULATION;
# else
dr_operation_mode_t dr_mode = DR_MODE_NONE;
# endif
# endif
#endif /* !DRINJECT */
char extra_ops[MAX_OPTIONS_STRING];
size_t extra_ops_sofar = 0;
#ifdef DRCONFIG
action_t action = action_none;
#endif
bool use_debug = false;
dr_platform_t dr_platform = DR_PLATFORM_DEFAULT;
#ifdef WINDOWS
/* FIXME i#840: Implement nudges on Linux. */
bool nudge_all = false;
process_id_t nudge_pid = 0;
client_id_t nudge_id = 0;
uint64 nudge_arg = 0;
bool list_registered = false;
uint nudge_timeout = INFINITE;
bool syswide_on = false;
bool syswide_off = false;
#endif /* WINDOWS */
bool global = false;
int exitcode;
#if defined(DRRUN) || defined(DRINJECT)
char *pidfile = NULL;
bool showstats = false;
bool showmem = false;
bool force_injection = false;
bool inject = true;
int limit = 0; /* in seconds */
char *drlib_path = NULL;
# ifdef WINDOWS
time_t start_time, end_time;
# else
bool use_ptrace = false;
bool kill_group = false;
# endif
char *app_name = NULL;
char full_app_name[MAXIMUM_PATH];
const char **app_argv;
char custom_dll[MAXIMUM_PATH];
int errcode;
void *inject_data;
bool success;
bool exit0 = false;
#endif
int i;
#ifndef DRINJECT
size_t j;
#endif
char buf[MAXIMUM_PATH];
char default_root[MAXIMUM_PATH];
char *c;
#if defined(DRCONFIG) || defined(DRRUN)
char native_tool[MAXIMUM_PATH];
#endif
#ifdef DRRUN
void *tofree = NULL;
bool configure = true;
#endif
char **argv;
drfront_status_t sc;
#if defined(WINDOWS) && !defined(_UNICODE)
# error _UNICODE must be defined
#else
/* Convert to UTF-8 if necessary */
sc = drfront_convert_args((const TCHAR **)targv, &argv, argc);
if (sc != DRFRONT_SUCCESS)
fatal("failed to process args: %d", sc);
#endif
memset(client_paths, 0, sizeof(client_paths));
extra_ops[0] = '\0';
#if defined(DRCONFIG) || defined(DRRUN)
native_tool[0] = '\0';
#endif
/* default root: we assume this tool is in <root>/bin{32,64}/dr*.exe */
get_absolute_path(argv[0], buf, BUFFER_SIZE_ELEMENTS(buf));
NULL_TERMINATE_BUFFER(buf);
c = buf + strlen(buf) - 1;
while (*c != '\\' && *c != '/' && c > buf)
c--;
_snprintf(c + 1, BUFFER_SIZE_ELEMENTS(buf) - (c + 1 - buf), "..");
NULL_TERMINATE_BUFFER(buf);
get_absolute_path(buf, default_root, BUFFER_SIZE_ELEMENTS(default_root));
NULL_TERMINATE_BUFFER(default_root);
dr_root = default_root;
info("default root: %s", default_root);
/* we re-read the tool list if the root or platform change */
read_tool_list(dr_root, dr_platform);
/* parse command line */
for (i = 1; i < argc; i++) {
/* params with no arg */
if (strcmp(argv[i], "-verbose") == 0 || strcmp(argv[i], "-v") == 0) {
verbose = true;
continue;
} else if (strcmp(argv[i], "-quiet") == 0) {
quiet = true;
continue;
} else if (strcmp(argv[i], "-nocheck") == 0) {
nocheck = true;
continue;
} else if (strcmp(argv[i], "-debug") == 0) {
use_debug = true;
continue;
} else if (!strcmp(argv[i], "-version")) {
#if defined(BUILD_NUMBER) && defined(VERSION_NUMBER)
printf(TOOLNAME " version %s -- build %d\n", STRINGIFY(VERSION_NUMBER),
BUILD_NUMBER);
#elif defined(BUILD_NUMBER)
printf(TOOLNAME " custom build %d -- %s\n", BUILD_NUMBER, __DATE__);
#else
printf(TOOLNAME " custom build -- %s, %s\n", __DATE__, __TIME__);
#endif
exit(0);
}
#ifdef DRCONFIG
# ifdef WINDOWS
/* FIXME i#840: These are NYI for Linux. */
else if (!strcmp(argv[i], "-list_registered")) {
action = action_list;
list_registered = true;
continue;
} else if (strcmp(argv[i], "-syswide_on") == 0) {
syswide_on = true;
continue;
} else if (strcmp(argv[i], "-syswide_off") == 0) {
syswide_off = true;
continue;
}
# endif
else if (strcmp(argv[i], "-global") == 0) {
global = true;
continue;
} else if (strcmp(argv[i], "-norun") == 0) {
dr_mode = DR_MODE_DO_NOT_RUN;
continue;
}
#endif
else if (strcmp(argv[i], "-32") == 0) {
dr_platform = DR_PLATFORM_32BIT;
read_tool_list(dr_root, dr_platform);
continue;
} else if (strcmp(argv[i], "-64") == 0) {
dr_platform = DR_PLATFORM_64BIT;
read_tool_list(dr_root, dr_platform);
continue;
}
#if defined(DRRUN) || defined(DRINJECT)
else if (strcmp(argv[i], "-stats") == 0) {
showstats = true;
continue;
} else if (strcmp(argv[i], "-mem") == 0) {
showmem = true;
continue;
} else if (strcmp(argv[i], "-no_inject") == 0 ||
/* support old drinjectx param name */
strcmp(argv[i], "-noinject") == 0 || strcmp(argv[i], "-static") == 0) {
DR_dll_not_needed = true;
inject = false;
continue;
} else if (strcmp(argv[i], "-force") == 0) {
force_injection = true;
continue;
} else if (strcmp(argv[i], "-no_wait") == 0) {
limit = -1;
continue;
}
# ifdef UNIX
else if (strcmp(argv[i], "-use_ptrace") == 0) {
/* Undocumented option for using ptrace on a fresh process. */
use_ptrace = true;
continue;
} else if (strcmp(argv[i], "-attach") == 0) {
const char *pid_str = argv[++i];
process_id_t pid = strtoul(pid_str, NULL, 10);
if (pid == ULONG_MAX)
usage(false, "-attach expects an integer pid");
if (pid != 0)
usage(false, "attaching to running processes is not yet implemented");
use_ptrace = true;
/* FIXME: use pid below to attach. */
continue;
}
# ifndef MACOS /* XXX i#1285: private loader NYI on MacOS */
else if (strcmp(argv[i], "-early") == 0) {
/* Now the default: left here just for back-compat */
continue;
} else if (strcmp(argv[i], "-late") == 0) {
/* Appending -no_early_inject to extra_ops communicates our intentions
* to drinjectlib.
*/
add_extra_option(extra_ops, BUFFER_SIZE_ELEMENTS(extra_ops), &extra_ops_sofar,
"-no_early_inject");
continue;
}
# endif
# endif /* UNIX */
else if (strcmp(argv[i], "-exit0") == 0) {
exit0 = true;
continue;
}
#endif
else if (strcmp(argv[i], "-help") == 0 || strcmp(argv[i], "--help") == 0 ||
strcmp(argv[i], "-h") == 0) {
usage(true, "" /* no error msg */);
continue;
}
/* all other flags have an argument -- make sure it exists */
else if (argv[i][0] == '-' && i == argc - 1) {
usage(false, "invalid arguments");
}
/* params with an arg */
if (strcmp(argv[i], "-root") == 0 ||
/* support -dr_home alias used by script */
strcmp(argv[i], "-dr_home") == 0) {
dr_root = argv[++i];
read_tool_list(dr_root, dr_platform);
} else if (strcmp(argv[i], "-logdir") == 0) {
/* Accept this for compatibility with the old drrun shell script. */
const char *dir = argv[++i];
if (!does_file_exist(dir))
usage(false, "-logdir %s does not exist", dir);
add_extra_option(extra_ops, BUFFER_SIZE_ELEMENTS(extra_ops), &extra_ops_sofar,
"-logdir `%s`", dir);
continue;
}
#ifdef DRCONFIG
else if (strcmp(argv[i], "-reg") == 0) {
if (action != action_none) {
usage(false, "more than one action specified");
}
action = action_register;
process = argv[++i];
} else if (strcmp(argv[i], "-unreg") == 0) {
if (action != action_none) {
usage(false, "more than one action specified");
}
action = action_unregister;
process = argv[++i];
} else if (strcmp(argv[i], "-isreg") == 0) {
if (action != action_none) {
usage(false, "more than one action specified");
}
action = action_list;
process = argv[++i];
}
# ifdef WINDOWS
/* FIXME i#840: Nudge is NYI for Linux. */
else if (strcmp(argv[i], "-nudge_timeout") == 0) {
nudge_timeout = strtoul(argv[++i], NULL, 10);
} else if (strcmp(argv[i], "-nudge") == 0 || strcmp(argv[i], "-nudge_pid") == 0 ||
strcmp(argv[i], "-nudge_all") == 0) {
if (action != action_none) {
usage(false, "more than one action specified");
}
if (i + 2 >= argc || (strcmp(argv[i], "-nudge_all") != 0 && i + 3 >= argc)) {
usage(false, "too few arguments to -nudge");
}
action = action_nudge;
if (strcmp(argv[i], "-nudge") == 0)
process = argv[++i];
else if (strcmp(argv[i], "-nudge_pid") == 0)
nudge_pid = strtoul(argv[++i], NULL, 10);
else
nudge_all = true;
nudge_id = strtoul(argv[++i], NULL, 16);
nudge_arg = _strtoui64(argv[++i], NULL, 16);
}
# endif
#endif
#if defined(DRCONFIG) || defined(DRRUN)
# if defined(MF_API) || defined(PROBE_API)
else if (strcmp(argv[i], "-mode") == 0) {
char *mode_str = argv[++i];
if (dr_mode == DR_MODE_DO_NOT_RUN)
usage(false, "cannot combine -norun with -mode");
if (strcmp(mode_str, "code") == 0) {
dr_mode = DR_MODE_CODE_MANIPULATION;
}
# ifdef MF_API
else if (strcmp(mode_str, "security") == 0) {
dr_mode = DR_MODE_MEMORY_FIREWALL;
}
# endif
# ifdef PROBE_API
else if (strcmp(mode_str, "probe") == 0) {
dr_mode = DR_MODE_PROBE;
}
# endif
else {
usage(false, "unknown mode: %s", mode_str);
}
}
# endif
else if (strcmp(argv[i], "-client") == 0) {
if (num_clients == MAX_CLIENT_LIBS) {
error("Maximum number of clients is %d", MAX_CLIENT_LIBS);
die();
} else {
const char *client;
int id;
const char *ops;
if (i + 3 >= argc) {
usage(false, "too few arguments to -client");
}
/* Support relative client paths: very useful! */
client = argv[++i];
id = strtoul(argv[++i], NULL, 16);
ops = argv[++i];
append_client(client, id, ops, client_paths, client_ids, client_options,
&num_clients);
}
} else if (strcmp(argv[i], "-ops") == 0) {
/* support repeating the option (i#477) */
add_extra_option(extra_ops, BUFFER_SIZE_ELEMENTS(extra_ops), &extra_ops_sofar,
"%s", argv[++i]);
}
#endif
#if defined(DRRUN) || defined(DRINJECT)
else if (strcmp(argv[i], "-pidfile") == 0) {
pidfile = argv[++i];
} else if (strcmp(argv[i], "-use_dll") == 0) {
DR_dll_not_needed = true;
/* Support relative path: very useful! */
get_absolute_path(argv[++i], custom_dll, BUFFER_SIZE_ELEMENTS(custom_dll));
NULL_TERMINATE_BUFFER(custom_dll);
drlib_path = custom_dll;
} else if (strcmp(argv[i], "-s") == 0) {
limit = atoi(argv[++i]);
if (limit <= 0)
usage(false, "invalid time");
} else if (strcmp(argv[i], "-m") == 0) {
limit = atoi(argv[++i]) * 60;
if (limit <= 0)
usage(false, "invalid time");
} else if (strcmp(argv[i], "-h") == 0) {
limit = atoi(argv[++i]) * 3600;
if (limit <= 0)
usage(false, "invalid time");
}
# ifdef UNIX
else if (strcmp(argv[i], "-killpg") == 0) {
kill_group = true;
}
# endif
#endif
#if defined(DRCONFIG) || defined(DRRUN)
/* if there are still options, assume user is using -- to separate and pass
* through options to DR. we do not handle mixing DR options with tool
* options: DR must come last. we would need to generate code here from
* optionsx.h to do otherwise, or to sanity check the DR options here.
*/
else if (argv[i][0] == '-') {
while (i < argc) {
if (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "-t") == 0 ||
strcmp(argv[i], "--") == 0) {
break;
}
add_extra_option(extra_ops, BUFFER_SIZE_ELEMENTS(extra_ops),
&extra_ops_sofar, "\"%s\"", argv[i]);
i++;
}
if (i < argc && (strcmp(argv[i], "-t") == 0 || strcmp(argv[i], "-c") == 0)) {
const char *client;
char client_buf[MAXIMUM_PATH];
size_t client_sofar = 0;
if (i + 1 >= argc)
usage(false, "too few arguments to %s", argv[i]);
if (num_clients != 0)
usage(false, "Cannot use -client with %s.", argv[i]);
client = argv[++i];
single_client_ops[0] = '\0';
if (strcmp(argv[i - 1], "-t") == 0) {
/* Client-requested DR default options come last, so they
* cannot be overridden by DR options passed here.
* The user must use -c or -client to do that.
*/
if (!read_tool_file(client, dr_root, dr_platform, client_buf,
BUFFER_SIZE_ELEMENTS(client_buf), extra_ops,
BUFFER_SIZE_ELEMENTS(extra_ops), &extra_ops_sofar,
single_client_ops,
BUFFER_SIZE_ELEMENTS(single_client_ops),
&client_sofar, native_tool,
BUFFER_SIZE_ELEMENTS(native_tool)))
usage(false, "unknown %s tool \"%s\" requested",
platform_name(dr_platform), client);
client = client_buf;
}
/* Treat everything up to -- or end of argv as client args. */
i++;
while (i < argc && strcmp(argv[i], "--") != 0) {
# ifdef DRCONFIG
if (action == action_none && strcmp(argv[i], "-reg") == 0) {
warn("-reg is taken as a client option!");
}
# endif /* DRCONFIG */
add_extra_option(single_client_ops,
BUFFER_SIZE_ELEMENTS(single_client_ops),
&client_sofar, "\"%s\"", argv[i]);
i++;
}
append_client(client, 0, single_client_ops, client_paths, client_ids,
client_options, &num_clients);
}
if (i < argc && strcmp(argv[i], "--") == 0) {
i++;
goto done_with_options;
}
}
#else /* DRINJECT */
else if (strcmp(argv[i], "--") == 0) {
i++;
goto done_with_options;
}
#endif
else {
#ifdef DRCONFIG
usage(false, "unknown option: %s", argv[i]);
#else
/* start of app and its args */
break;
#endif
}
}
#if defined(DRCONFIG) || defined(DRRUN) || defined(DRINJECT)
done_with_options:
#endif
#if defined(DRRUN) || defined(DRINJECT)
# ifdef DRRUN
/* Support no app if the tool has its own frontend, under the assumption
* it may have post-processing or other features.
*/
if (i < argc || native_tool[0] == '\0') {
# endif
if (i >= argc)
usage(false, "%s", "no app specified");
app_name = argv[i++];
search_env(app_name, "PATH", full_app_name, BUFFER_SIZE_ELEMENTS(full_app_name));
NULL_TERMINATE_BUFFER(full_app_name);
if (full_app_name[0] == '\0') {
/* may need to append .exe, FIXME : other executable types */
char tmp_buf[MAXIMUM_PATH];
_snprintf(tmp_buf, BUFFER_SIZE_ELEMENTS(tmp_buf), "%s%s", app_name, ".exe");
NULL_TERMINATE_BUFFER(tmp_buf);
search_env(tmp_buf, "PATH", full_app_name,
BUFFER_SIZE_ELEMENTS(full_app_name));
}
if (full_app_name[0] == '\0') {
/* last try */
get_absolute_path(app_name, full_app_name,
BUFFER_SIZE_ELEMENTS(full_app_name));
NULL_TERMINATE_BUFFER(full_app_name);
}
if (full_app_name[0] != '\0')
app_name = full_app_name;
info("targeting application: \"%s\"", app_name);
# ifdef DRRUN
}
# endif
/* note that we want target app name as part of cmd line
* (hence &argv[i - 1])
* (FYI: if we were using WinMain, the pzsCmdLine passed in
* does not have our own app name in it)
*/
app_argv = (const char **)&argv[i - 1];
if (verbose) {
c = buf;
for (i = 0; app_argv[i] != NULL; i++) {
c += _snprintf(c, BUFFER_SIZE_ELEMENTS(buf) - (c - buf), " \"%s\"",
app_argv[i]);
}
info("app cmdline: %s", buf);
}
# ifdef DRRUN
if (native_tool[0] != '\0') {
app_name = native_tool;
inject = false;
configure = false;
app_argv = switch_to_native_tool(app_argv, native_tool,
/* this will be changed, but we don't
* need it again
*/
(char *)client_options[0]);
tofree = (void *)app_argv;
}
# endif
#else
if (i < argc)
usage(false, "%s", "invalid extra arguments specified");
#endif
#ifdef WINDOWS
/* FIXME i#900: This doesn't work on Linux, and doesn't do the right thing
* on Windows.
*/
/* PR 244206: set the registry view before any registry access */
set_dr_platform(dr_platform);
#endif
/* support running out of a debug build dir */
if (!use_debug &&
!check_dr_root(dr_root, false, dr_platform, false /*!pre*/, false /*!report*/) &&
check_dr_root(dr_root, true, dr_platform, false /*!pre*/, false /*!report*/)) {
info("debug build directory detected: switching to debug build");
use_debug = true;
}
#ifdef DRCONFIG
if (verbose) {
dr_get_config_dir(global, true /*use temp*/, buf, BUFFER_SIZE_ELEMENTS(buf));
info("configuration directory is \"%s\"", buf);
}
if (action == action_register) {
if (!register_proc(process, 0, global, dr_root, dr_mode, use_debug, dr_platform,
extra_ops))
die();
for (j = 0; j < num_clients; j++) {
if (!register_client(process, 0, global, dr_platform, client_ids[j],
client_paths[j], client_options[j]))
die();
}
} else if (action == action_unregister) {
if (!unregister_proc(process, 0, global, dr_platform))
die();
}
# ifndef WINDOWS
else {
usage(false, "no action specified");
}
# else /* WINDOWS */
/* FIXME i#840: Nudge NYI on Linux. */
else if (action == action_nudge) {
int count = 1;
dr_config_status_t res = DR_SUCCESS;
if (nudge_all)
res = dr_nudge_all(nudge_id, nudge_arg, nudge_timeout, &count);
else if (nudge_pid != 0) {
res = dr_nudge_pid(nudge_pid, nudge_id, nudge_arg, nudge_timeout);
if (res == DR_NUDGE_PID_NOT_INJECTED)
printf("process %d is not running under DR\n", nudge_pid);
if (res != DR_SUCCESS && res != DR_NUDGE_TIMEOUT) {
count = 0;
}
} else
res = dr_nudge_process(process, nudge_id, nudge_arg, nudge_timeout, &count);
printf("%d processes nudged\n", count);
if (res == DR_NUDGE_TIMEOUT)
printf("timed out waiting for nudge to complete\n");
else if (res != DR_SUCCESS)
printf("nudge operation failed, verify permissions and parameters.\n");
}
# ifdef WINDOWS
/* FIXME i#840: Process iterator NYI for Linux. */
else if (action == action_list) {
if (!list_registered)
list_process(process, global, dr_platform, NULL);
else /* list all */ {
dr_registered_process_iterator_t *iter =
dr_registered_process_iterator_start(dr_platform, global);
printf("Registered %s processes for %s\n", global ? "global" : "local",
platform_name(dr_platform));
while (dr_registered_process_iterator_hasnext(iter))
list_process(NULL, global, dr_platform, iter);
dr_registered_process_iterator_stop(iter);
}
}
# endif
else if (!syswide_on && !syswide_off) {
usage(false, "no action specified");
}
if (syswide_on) {
DWORD platform;
if (get_platform(&platform) != ERROR_SUCCESS)
platform = PLATFORM_UNKNOWN;
if (platform >= PLATFORM_WIN_8 &&
IF_X64_ELSE(
dr_platform != DR_PLATFORM_32BIT,
(dr_platform == DR_PLATFORM_64BIT || !is_wow64(GetCurrentProcess())))) {
/* FIXME i#1522: enable AppInit for non-WOW64 on win8+ */
error("syswide_on is not yet supported on Windows 8+ non-WOW64");
die();
}
if (!check_dr_root(dr_root, false, dr_platform, true /*pre*/, true /*report*/))
die();
/* If this is the first setting of AppInit on NT, warn about reboot */
if (!dr_syswide_is_on(dr_platform, dr_root)) {
if (platform == PLATFORM_WIN_NT_4) {
warn("on Windows NT, applications will not be taken over until reboot");
} else if (platform >= PLATFORM_WIN_7) {
/* i#323 will fix this but good to warn the user */
warn("on Windows 7+, syswide_on relaxes system security by removing "
"certain code signing requirements");
}
}
if (dr_register_syswide(dr_platform, dr_root) != ERROR_SUCCESS) {
/* PR 233108: try to give more info on whether a privilege failure */
warn("syswide set failed: re-run as administrator");
}
}
if (syswide_off) {
if (dr_unregister_syswide(dr_platform, dr_root) != ERROR_SUCCESS) {
/* PR 233108: try to give more info on whether a privilege failure */
warn("syswide set failed: re-run as administrator");
}
}
# endif /* WINDOWS */
exitcode = 0;
goto cleanup;
#else /* DRCONFIG */
if (!global) {
/* i#939: attempt to work w/o any HOME/USERPROFILE by using a temp dir */
dr_get_config_dir(global, true /*use temp*/, buf, BUFFER_SIZE_ELEMENTS(buf));
info("configuration directory is \"%s\"", buf);
}
# ifdef UNIX
/* i#1676: detect whether under gdb */
char path_buf[MAXIMUM_PATH];
_snprintf(path_buf, BUFFER_SIZE_ELEMENTS(path_buf), "/proc/%d/exe", getppid());
NULL_TERMINATE_BUFFER(path_buf);
i = readlink(path_buf, buf, BUFFER_SIZE_ELEMENTS(buf));
if (i > 0) {
if (i < BUFFER_SIZE_ELEMENTS(buf))
buf[i] = '\0';
else
NULL_TERMINATE_BUFFER(buf);
}
/* On Linux, we use exec by default to create the app process. This matches
* our drrun shell script and makes scripting easier for the user.
*/
if (limit == 0 && !use_ptrace && !kill_group) {
info("will exec %s", app_name);
errcode = dr_inject_prepare_to_exec(app_name, app_argv, &inject_data);
} else
# endif /* UNIX */
{
errcode = dr_inject_process_create(app_name, app_argv, &inject_data);
info("created child with pid %d for %s", dr_inject_get_process_id(inject_data),
app_name);
}
# ifdef UNIX
if (limit != 0 && kill_group) {
/* Move the child to its own process group. */
process_id_t child_pid = dr_inject_get_process_id(inject_data);
int res = setpgid(child_pid, child_pid);
if (res < 0) {
perror("ERROR in setpgid");
goto error;
}
}
# endif
if (errcode ==
ERROR_IMAGE_MACHINE_TYPE_MISMATCH_EXE
/* Check whether -32/64 is specified, but only for Linux as we do
* not support cross-arch on Windows yet (i#803).
*/
IF_UNIX(&&dr_platform != IF_X64_ELSE(DR_PLATFORM_32BIT, DR_PLATFORM_64BIT))) {
if (nocheck) {
/* Allow override for cases like i#1224 */
warn("Target process %s appears to be for the wrong architecture.", app_name);
warn("Attempting to run anyway, but it may run natively if injection fails.");
errcode = 0;
} else {
/* For Windows, better error message than the FormatMessage */
error("Target process %s is for the wrong architecture", app_name);
goto error; /* the process was still created */
}
}
if (errcode != 0 IF_UNIX(&&errcode != ERROR_IMAGE_MACHINE_TYPE_MISMATCH_EXE)) {
IF_WINDOWS(int sofar =)
_snprintf(buf, BUFFER_SIZE_ELEMENTS(buf),
"Failed to create process for \"%s\": ", app_name);
# ifdef WINDOWS
if (sofar > 0) {
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
NULL, errcode, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPTSTR)buf + sofar,
BUFFER_SIZE_ELEMENTS(buf) - sofar * sizeof(char), NULL);
}
# endif /* WINDOWS */
NULL_TERMINATE_BUFFER(buf);
error("%s", buf);
goto error;
}
/* i#200/PR 459481: communicate child pid via file */
if (pidfile != NULL)
write_pid_to_file(pidfile, dr_inject_get_process_id(inject_data));
# ifdef DRRUN
/* even if !inject we create a config file, for use running standalone API
* apps. if user doesn't want a config file, should use "drinject -noinject".
*/
if (configure) {
process = dr_inject_get_image_name(inject_data);
if (!register_proc(process, dr_inject_get_process_id(inject_data), global,
dr_root, dr_mode, use_debug, dr_platform, extra_ops))
goto error;
for (j = 0; j < num_clients; j++) {
if (!register_client(process, dr_inject_get_process_id(inject_data), global,
dr_platform, client_ids[j], client_paths[j],
client_options[j]))
goto error;
}
}
# endif
# ifdef UNIX
if (use_ptrace) {
if (!dr_inject_prepare_to_ptrace(inject_data)) {
error("unable to use ptrace");
goto error;
} else {
info("using ptrace to inject");
}
}
if (kill_group) {
/* Move the child to its own process group. */
bool res = dr_inject_prepare_new_process_group(inject_data);
if (!res) {
error("error moving child to new process group");
goto error;
}
}
# endif
if (inject && !dr_inject_process_inject(inject_data, force_injection, drlib_path)) {
# ifdef DRRUN
error("unable to inject: exec of |%s| failed", drlib_path);
# else
error("unable to inject: did you forget to run drconfig first?");
# endif
goto error;
}
IF_WINDOWS(start_time = time(NULL);)
if (!dr_inject_process_run(inject_data)) {
error("unable to run");
goto error;
}
# ifdef WINDOWS
if (limit == 0 && dr_inject_using_debug_key(inject_data)) {
info("%s", "Using debugger key injection");
limit = -1; /* no wait */
}
# endif
if (limit >= 0) {
# ifdef WINDOWS
double wallclock;
# endif
uint64 limit_millis = limit * 1000;
info("waiting %sfor app to exit...", (limit <= 0) ? "forever " : "");
success = dr_inject_wait_for_child(inject_data, limit_millis);
# ifdef WINDOWS
end_time = time(NULL);
wallclock = difftime(end_time, start_time);
if (showstats || showmem)
dr_inject_print_stats(inject_data, (int)wallclock, showstats, showmem);
# endif
if (!success)
info("timeout after %d seconds\n", limit);
} else {
success = true; /* Don't kill the child if we're not waiting. */
}
exitcode = dr_inject_process_exit(inject_data, !success /*kill process*/);
if (limit < 0)
exitcode = 0; /* Return success if we didn't wait. */
if (exit0)
exitcode = 0;
goto cleanup;
error:
/* we created the process suspended so if we later had an error be sure
* to kill it instead of leaving it hanging
*/
if (inject_data != NULL)
dr_inject_process_exit(inject_data, true /*kill process*/);
# ifdef DRRUN
if (tofree != NULL)
free(tofree);
# endif
exitcode = 1;
#endif /* !DRCONFIG */
cleanup:
sc = drfront_cleanup_args(argv, argc);
if (sc != DRFRONT_SUCCESS)
fatal("failed to free memory for args: %d", sc);
/* FIXME i#840: We can't actually match exit status on Linux perfectly
* since the kernel reserves most of the bits for signal codes. At the
* very least, we should ensure if the app exits with a signal we exit
* non-zero.
*/
return exitcode;
}
| 1 | 15,258 | I don't understand why anyone would pass 0? If you don't want to attach to an existing process you just wouldn't pass -attach. I would remove that sentence and make 0 an invalid argument. | DynamoRIO-dynamorio | c |
@@ -77,8 +77,6 @@ void (*volatile s_current_function)(ThreadsExec &, const void *);
const void *volatile s_current_function_arg = nullptr;
struct Sentinel {
- Sentinel() {}
-
~Sentinel() {
if (s_thread_pool_size[0] || s_thread_pool_size[1] ||
s_thread_pool_size[2] || s_current_reduce_size || | 1 | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 2.0
// Copyright (2014) Sandia Corporation
//
// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott ([email protected])
//
// ************************************************************************
//@HEADER
*/
#include <Kokkos_Macros.hpp>
#if defined(KOKKOS_ENABLE_THREADS)
#include <cstdint>
#include <limits>
#include <utility>
#include <iostream>
#include <sstream>
#include <Kokkos_Core.hpp>
#include <impl/Kokkos_Error.hpp>
#include <impl/Kokkos_CPUDiscovery.hpp>
#include <impl/Kokkos_Profiling_Interface.hpp>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
namespace Kokkos {
namespace Impl {
namespace {
ThreadsExec s_threads_process;
ThreadsExec *s_threads_exec[ThreadsExec::MAX_THREAD_COUNT] = {nullptr};
pthread_t s_threads_pid[ThreadsExec::MAX_THREAD_COUNT] = {0};
std::pair<unsigned, unsigned> s_threads_coord[ThreadsExec::MAX_THREAD_COUNT];
int s_thread_pool_size[3] = {0, 0, 0};
unsigned s_current_reduce_size = 0;
unsigned s_current_shared_size = 0;
void (*volatile s_current_function)(ThreadsExec &, const void *);
const void *volatile s_current_function_arg = nullptr;
struct Sentinel {
Sentinel() {}
~Sentinel() {
if (s_thread_pool_size[0] || s_thread_pool_size[1] ||
s_thread_pool_size[2] || s_current_reduce_size ||
s_current_shared_size || s_current_function || s_current_function_arg ||
s_threads_exec[0]) {
std::cerr << "ERROR : Process exiting while Kokkos::Threads is still "
"initialized"
<< std::endl;
}
}
};
inline unsigned fan_size(const unsigned rank, const unsigned size) {
const unsigned rank_rev = size - (rank + 1);
unsigned count = 0;
for (unsigned n = 1; (rank_rev + n < size) && !(rank_rev & n); n <<= 1) {
++count;
}
return count;
}
} // namespace
} // namespace Impl
} // namespace Kokkos
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
namespace Kokkos {
namespace Impl {
void execute_function_noop(ThreadsExec &, const void *) {}
void ThreadsExec::driver(void) {
SharedAllocationRecord<void, void>::tracking_enable();
ThreadsExec this_thread;
while (ThreadsExec::Active == this_thread.m_pool_state) {
(*s_current_function)(this_thread, s_current_function_arg);
// Deactivate thread and wait for reactivation
this_thread.m_pool_state = ThreadsExec::Inactive;
wait_yield(this_thread.m_pool_state, ThreadsExec::Inactive);
}
}
ThreadsExec::ThreadsExec()
: m_pool_base(nullptr),
m_scratch(nullptr),
m_scratch_reduce_end(0),
m_scratch_thread_end(0),
m_numa_rank(0),
m_numa_core_rank(0),
m_pool_rank(0),
m_pool_size(0),
m_pool_fan_size(0),
m_pool_state(ThreadsExec::Terminating) {
if (&s_threads_process != this) {
// A spawned thread
ThreadsExec *const nil = nullptr;
// Which entry in 's_threads_exec', possibly determined from hwloc binding
const int entry =
((size_t)s_current_function_arg) < size_t(s_thread_pool_size[0])
? ((size_t)s_current_function_arg)
: size_t(Kokkos::hwloc::bind_this_thread(s_thread_pool_size[0],
s_threads_coord));
// Given a good entry set this thread in the 's_threads_exec' array
if (entry < s_thread_pool_size[0] &&
nil == atomic_compare_exchange(s_threads_exec + entry, nil, this)) {
const std::pair<unsigned, unsigned> coord =
Kokkos::hwloc::get_this_thread_coordinate();
m_numa_rank = coord.first;
m_numa_core_rank = coord.second;
m_pool_base = s_threads_exec;
m_pool_rank = s_thread_pool_size[0] - (entry + 1);
m_pool_rank_rev = s_thread_pool_size[0] - (pool_rank() + 1);
m_pool_size = s_thread_pool_size[0];
m_pool_fan_size = fan_size(m_pool_rank, m_pool_size);
m_pool_state = ThreadsExec::Active;
s_threads_pid[m_pool_rank] = pthread_self();
// Inform spawning process that the threads_exec entry has been set.
s_threads_process.m_pool_state = ThreadsExec::Active;
} else {
// Inform spawning process that the threads_exec entry could not be set.
s_threads_process.m_pool_state = ThreadsExec::Terminating;
}
} else {
// Enables 'parallel_for' to execute on unitialized Threads device
m_pool_rank = 0;
m_pool_size = 1;
m_pool_state = ThreadsExec::Inactive;
s_threads_pid[m_pool_rank] = pthread_self();
}
}
ThreadsExec::~ThreadsExec() {
const unsigned entry = m_pool_size - (m_pool_rank + 1);
typedef Kokkos::Impl::SharedAllocationRecord<Kokkos::HostSpace, void> Record;
if (m_scratch) {
Record *const r = Record::get_record(m_scratch);
m_scratch = nullptr;
Record::decrement(r);
}
m_pool_base = nullptr;
m_scratch_reduce_end = 0;
m_scratch_thread_end = 0;
m_numa_rank = 0;
m_numa_core_rank = 0;
m_pool_rank = 0;
m_pool_size = 0;
m_pool_fan_size = 0;
m_pool_state = ThreadsExec::Terminating;
if (&s_threads_process != this && entry < MAX_THREAD_COUNT) {
ThreadsExec *const nil = nullptr;
atomic_compare_exchange(s_threads_exec + entry, this, nil);
s_threads_process.m_pool_state = ThreadsExec::Terminating;
}
}
int ThreadsExec::get_thread_count() { return s_thread_pool_size[0]; }
ThreadsExec *ThreadsExec::get_thread(const int init_thread_rank) {
ThreadsExec *const th =
init_thread_rank < s_thread_pool_size[0]
? s_threads_exec[s_thread_pool_size[0] - (init_thread_rank + 1)]
: nullptr;
if (nullptr == th || th->m_pool_rank != init_thread_rank) {
std::ostringstream msg;
msg << "Kokkos::Impl::ThreadsExec::get_thread ERROR : "
<< "thread " << init_thread_rank << " of " << s_thread_pool_size[0];
if (nullptr == th) {
msg << " does not exist";
} else {
msg << " has wrong thread_rank " << th->m_pool_rank;
}
Kokkos::Impl::throw_runtime_exception(msg.str());
}
return th;
}
//----------------------------------------------------------------------------
void ThreadsExec::execute_sleep(ThreadsExec &exec, const void *) {
ThreadsExec::global_lock();
ThreadsExec::global_unlock();
const int n = exec.m_pool_fan_size;
const int rank_rev = exec.m_pool_size - (exec.m_pool_rank + 1);
for (int i = 0; i < n; ++i) {
Impl::spinwait_while_equal<int>(
exec.m_pool_base[rank_rev + (1 << i)]->m_pool_state,
ThreadsExec::Active);
}
exec.m_pool_state = ThreadsExec::Inactive;
}
} // namespace Impl
} // namespace Kokkos
//----------------------------------------------------------------------------
namespace Kokkos {
namespace Impl {
void ThreadsExec::verify_is_process(const std::string &name,
const bool initialized) {
if (!is_process()) {
std::string msg(name);
msg.append(
" FAILED : Called by a worker thread, can only be called by the master "
"process.");
Kokkos::Impl::throw_runtime_exception(msg);
}
if (initialized && 0 == s_thread_pool_size[0]) {
std::string msg(name);
msg.append(" FAILED : Threads not initialized.");
Kokkos::Impl::throw_runtime_exception(msg);
}
}
int ThreadsExec::in_parallel() {
// A thread function is in execution and
// the function argument is not the special threads process argument and
// the master process is a worker or is not the master process.
return s_current_function && (&s_threads_process != s_current_function_arg) &&
(s_threads_process.m_pool_base || !is_process());
}
// Wait for root thread to become inactive
void ThreadsExec::fence() {
if (s_thread_pool_size[0]) {
// Wait for the root thread to complete:
Impl::spinwait_while_equal<int>(s_threads_exec[0]->m_pool_state,
ThreadsExec::Active);
}
s_current_function = nullptr;
s_current_function_arg = nullptr;
// Make sure function and arguments are cleared before
// potentially re-activating threads with a subsequent launch.
memory_fence();
}
/** \brief Begin execution of the asynchronous functor */
void ThreadsExec::start(void (*func)(ThreadsExec &, const void *),
const void *arg) {
verify_is_process("ThreadsExec::start", true);
if (s_current_function || s_current_function_arg) {
Kokkos::Impl::throw_runtime_exception(
std::string("ThreadsExec::start() FAILED : already executing"));
}
s_current_function = func;
s_current_function_arg = arg;
// Make sure function and arguments are written before activating threads.
memory_fence();
// Activate threads:
for (int i = s_thread_pool_size[0]; 0 < i--;) {
s_threads_exec[i]->m_pool_state = ThreadsExec::Active;
}
if (s_threads_process.m_pool_size) {
// Master process is the root thread, run it:
(*func)(s_threads_process, arg);
s_threads_process.m_pool_state = ThreadsExec::Inactive;
}
}
//----------------------------------------------------------------------------
bool ThreadsExec::sleep() {
verify_is_process("ThreadsExec::sleep", true);
if (&execute_sleep == s_current_function) return false;
fence();
ThreadsExec::global_lock();
s_current_function = &execute_sleep;
// Activate threads:
for (unsigned i = s_thread_pool_size[0]; 0 < i;) {
s_threads_exec[--i]->m_pool_state = ThreadsExec::Active;
}
return true;
}
bool ThreadsExec::wake() {
verify_is_process("ThreadsExec::wake", true);
if (&execute_sleep != s_current_function) return false;
ThreadsExec::global_unlock();
if (s_threads_process.m_pool_base) {
execute_sleep(s_threads_process, nullptr);
s_threads_process.m_pool_state = ThreadsExec::Inactive;
}
fence();
return true;
}
//----------------------------------------------------------------------------
void ThreadsExec::execute_serial(void (*func)(ThreadsExec &, const void *)) {
s_current_function = func;
s_current_function_arg = &s_threads_process;
// Make sure function and arguments are written before activating threads.
memory_fence();
const unsigned begin = s_threads_process.m_pool_base ? 1 : 0;
for (unsigned i = s_thread_pool_size[0]; begin < i;) {
ThreadsExec &th = *s_threads_exec[--i];
th.m_pool_state = ThreadsExec::Active;
wait_yield(th.m_pool_state, ThreadsExec::Active);
}
if (s_threads_process.m_pool_base) {
s_threads_process.m_pool_state = ThreadsExec::Active;
(*func)(s_threads_process, nullptr);
s_threads_process.m_pool_state = ThreadsExec::Inactive;
}
s_current_function_arg = nullptr;
s_current_function = nullptr;
// Make sure function and arguments are cleared before proceeding.
memory_fence();
}
//----------------------------------------------------------------------------
void *ThreadsExec::root_reduce_scratch() {
return s_threads_process.reduce_memory();
}
void ThreadsExec::execute_resize_scratch(ThreadsExec &exec, const void *) {
typedef Kokkos::Impl::SharedAllocationRecord<Kokkos::HostSpace, void> Record;
if (exec.m_scratch) {
Record *const r = Record::get_record(exec.m_scratch);
exec.m_scratch = nullptr;
Record::decrement(r);
}
exec.m_scratch_reduce_end = s_threads_process.m_scratch_reduce_end;
exec.m_scratch_thread_end = s_threads_process.m_scratch_thread_end;
if (s_threads_process.m_scratch_thread_end) {
// Allocate tracked memory:
{
Record *const r =
Record::allocate(Kokkos::HostSpace(), "thread_scratch",
s_threads_process.m_scratch_thread_end);
Record::increment(r);
exec.m_scratch = r->data();
}
unsigned *ptr = reinterpret_cast<unsigned *>(exec.m_scratch);
unsigned *const end =
ptr + s_threads_process.m_scratch_thread_end / sizeof(unsigned);
// touch on this thread
while (ptr < end) *ptr++ = 0;
}
}
void *ThreadsExec::resize_scratch(size_t reduce_size, size_t thread_size) {
enum { ALIGN_MASK = Kokkos::Impl::MEMORY_ALIGNMENT - 1 };
fence();
const size_t old_reduce_size = s_threads_process.m_scratch_reduce_end;
const size_t old_thread_size = s_threads_process.m_scratch_thread_end -
s_threads_process.m_scratch_reduce_end;
reduce_size = (reduce_size + ALIGN_MASK) & ~ALIGN_MASK;
thread_size = (thread_size + ALIGN_MASK) & ~ALIGN_MASK;
// Increase size or deallocate completely.
if ((old_reduce_size < reduce_size) || (old_thread_size < thread_size) ||
((reduce_size == 0 && thread_size == 0) &&
(old_reduce_size != 0 || old_thread_size != 0))) {
verify_is_process("ThreadsExec::resize_scratch", true);
s_threads_process.m_scratch_reduce_end = reduce_size;
s_threads_process.m_scratch_thread_end = reduce_size + thread_size;
execute_serial(&execute_resize_scratch);
s_threads_process.m_scratch = s_threads_exec[0]->m_scratch;
}
return s_threads_process.m_scratch;
}
//----------------------------------------------------------------------------
void ThreadsExec::print_configuration(std::ostream &s, const bool detail) {
verify_is_process("ThreadsExec::print_configuration", false);
fence();
const unsigned numa_count = Kokkos::hwloc::get_available_numa_count();
const unsigned cores_per_numa = Kokkos::hwloc::get_available_cores_per_numa();
const unsigned threads_per_core =
Kokkos::hwloc::get_available_threads_per_core();
// Forestall compiler warnings for unused variables.
(void)numa_count;
(void)cores_per_numa;
(void)threads_per_core;
s << "Kokkos::Threads";
#if defined(KOKKOS_ENABLE_THREADS)
s << " KOKKOS_ENABLE_THREADS";
#endif
#if defined(KOKKOS_ENABLE_HWLOC)
s << " hwloc[" << numa_count << "x" << cores_per_numa << "x"
<< threads_per_core << "]";
#endif
if (s_thread_pool_size[0]) {
s << " threads[" << s_thread_pool_size[0] << "]"
<< " threads_per_numa[" << s_thread_pool_size[1] << "]"
<< " threads_per_core[" << s_thread_pool_size[2] << "]";
if (nullptr == s_threads_process.m_pool_base) {
s << " Asynchronous";
}
s << " ReduceScratch[" << s_current_reduce_size << "]"
<< " SharedScratch[" << s_current_shared_size << "]";
s << std::endl;
if (detail) {
for (int i = 0; i < s_thread_pool_size[0]; ++i) {
ThreadsExec *const th = s_threads_exec[i];
if (th) {
const int rank_rev = th->m_pool_size - (th->m_pool_rank + 1);
s << " Thread[ " << th->m_pool_rank << " : " << th->m_numa_rank << "."
<< th->m_numa_core_rank << " ]";
s << " Fan{";
for (int j = 0; j < th->m_pool_fan_size; ++j) {
ThreadsExec *const thfan = th->m_pool_base[rank_rev + (1 << j)];
s << " [ " << thfan->m_pool_rank << " : " << thfan->m_numa_rank
<< "." << thfan->m_numa_core_rank << " ]";
}
s << " }";
if (th == &s_threads_process) {
s << " is_process";
}
}
s << std::endl;
}
}
} else {
s << " not initialized" << std::endl;
}
}
//----------------------------------------------------------------------------
int ThreadsExec::is_initialized() { return nullptr != s_threads_exec[0]; }
void ThreadsExec::initialize(unsigned thread_count, unsigned use_numa_count,
unsigned use_cores_per_numa,
bool allow_asynchronous_threadpool) {
static const Sentinel sentinel;
const bool is_initialized = 0 != s_thread_pool_size[0];
unsigned thread_spawn_failed = 0;
for (int i = 0; i < ThreadsExec::MAX_THREAD_COUNT; i++)
s_threads_exec[i] = nullptr;
if (!is_initialized) {
// If thread_count, use_numa_count, or use_cores_per_numa are zero
// then they will be given default values based upon hwloc detection
// and allowed asynchronous execution.
const bool hwloc_avail = Kokkos::hwloc::available();
const bool hwloc_can_bind =
hwloc_avail && Kokkos::hwloc::can_bind_threads();
if (thread_count == 0) {
thread_count = hwloc_avail
? Kokkos::hwloc::get_available_numa_count() *
Kokkos::hwloc::get_available_cores_per_numa() *
Kokkos::hwloc::get_available_threads_per_core()
: 1;
}
const unsigned thread_spawn_begin = hwloc::thread_mapping(
"Kokkos::Threads::initialize", allow_asynchronous_threadpool,
thread_count, use_numa_count, use_cores_per_numa, s_threads_coord);
const std::pair<unsigned, unsigned> proc_coord = s_threads_coord[0];
if (thread_spawn_begin) {
// Synchronous with s_threads_coord[0] as the process core
// Claim entry #0 for binding the process core.
s_threads_coord[0] = std::pair<unsigned, unsigned>(~0u, ~0u);
}
s_thread_pool_size[0] = thread_count;
s_thread_pool_size[1] = s_thread_pool_size[0] / use_numa_count;
s_thread_pool_size[2] = s_thread_pool_size[1] / use_cores_per_numa;
s_current_function =
&execute_function_noop; // Initialization work function
for (unsigned ith = thread_spawn_begin; ith < thread_count; ++ith) {
s_threads_process.m_pool_state = ThreadsExec::Inactive;
// If hwloc available then spawned thread will
// choose its own entry in 's_threads_coord'
// otherwise specify the entry.
s_current_function_arg =
(void *)static_cast<uintptr_t>(hwloc_can_bind ? ~0u : ith);
// Make sure all outstanding memory writes are complete
// before spawning the new thread.
memory_fence();
// Spawn thread executing the 'driver()' function.
// Wait until spawned thread has attempted to initialize.
// If spawning and initialization is successfull then
// an entry in 's_threads_exec' will be assigned.
if (ThreadsExec::spawn()) {
wait_yield(s_threads_process.m_pool_state, ThreadsExec::Inactive);
}
if (s_threads_process.m_pool_state == ThreadsExec::Terminating) break;
}
// Wait for all spawned threads to deactivate before zeroing the function.
for (unsigned ith = thread_spawn_begin; ith < thread_count; ++ith) {
// Try to protect against cache coherency failure by casting to volatile.
ThreadsExec *const th = ((ThreadsExec * volatile *)s_threads_exec)[ith];
if (th) {
wait_yield(th->m_pool_state, ThreadsExec::Active);
} else {
++thread_spawn_failed;
}
}
s_current_function = nullptr;
s_current_function_arg = nullptr;
s_threads_process.m_pool_state = ThreadsExec::Inactive;
memory_fence();
if (!thread_spawn_failed) {
// Bind process to the core on which it was located before spawning
// occured
if (hwloc_can_bind) {
Kokkos::hwloc::bind_this_thread(proc_coord);
}
if (thread_spawn_begin) { // Include process in pool.
const std::pair<unsigned, unsigned> coord =
Kokkos::hwloc::get_this_thread_coordinate();
s_threads_exec[0] = &s_threads_process;
s_threads_process.m_numa_rank = coord.first;
s_threads_process.m_numa_core_rank = coord.second;
s_threads_process.m_pool_base = s_threads_exec;
s_threads_process.m_pool_rank =
thread_count - 1; // Reversed for scan-compatible reductions
s_threads_process.m_pool_size = thread_count;
s_threads_process.m_pool_fan_size = fan_size(
s_threads_process.m_pool_rank, s_threads_process.m_pool_size);
s_threads_pid[s_threads_process.m_pool_rank] = pthread_self();
} else {
s_threads_process.m_pool_base = nullptr;
s_threads_process.m_pool_rank = 0;
s_threads_process.m_pool_size = 0;
s_threads_process.m_pool_fan_size = 0;
}
// Initial allocations:
ThreadsExec::resize_scratch(1024, 1024);
} else {
s_thread_pool_size[0] = 0;
s_thread_pool_size[1] = 0;
s_thread_pool_size[2] = 0;
}
}
if (is_initialized || thread_spawn_failed) {
std::ostringstream msg;
msg << "Kokkos::Threads::initialize ERROR";
if (is_initialized) {
msg << " : already initialized";
}
if (thread_spawn_failed) {
msg << " : failed to spawn " << thread_spawn_failed << " threads";
}
Kokkos::Impl::throw_runtime_exception(msg.str());
}
// Check for over-subscription
if (Kokkos::show_warnings() &&
(Impl::mpi_ranks_per_node() * long(thread_count) >
Impl::processors_per_node())) {
std::cerr << "Kokkos::Threads::initialize WARNING: You are likely "
"oversubscribing your CPU cores."
<< std::endl;
std::cerr << " Detected: "
<< Impl::processors_per_node() << " cores per node." << std::endl;
std::cerr << " Detected: "
<< Impl::mpi_ranks_per_node() << " MPI_ranks per node."
<< std::endl;
std::cerr << " Requested: "
<< thread_count << " threads per process." << std::endl;
}
// Init the array for used for arbitrarily sized atomics
Impl::init_lock_array_host_space();
Impl::SharedAllocationRecord<void, void>::tracking_enable();
#if defined(KOKKOS_ENABLE_DEPRECATED_CODE) && defined(KOKKOS_ENABLE_PROFILING)
Kokkos::Profiling::initialize();
#endif
}
//----------------------------------------------------------------------------
void ThreadsExec::finalize() {
verify_is_process("ThreadsExec::finalize", false);
fence();
resize_scratch(0, 0);
const unsigned begin = s_threads_process.m_pool_base ? 1 : 0;
for (unsigned i = s_thread_pool_size[0]; begin < i--;) {
if (s_threads_exec[i]) {
s_threads_exec[i]->m_pool_state = ThreadsExec::Terminating;
wait_yield(s_threads_process.m_pool_state, ThreadsExec::Inactive);
s_threads_process.m_pool_state = ThreadsExec::Inactive;
}
s_threads_pid[i] = 0;
}
if (s_threads_process.m_pool_base) {
(&s_threads_process)->~ThreadsExec();
s_threads_exec[0] = nullptr;
}
if (Kokkos::hwloc::can_bind_threads()) {
Kokkos::hwloc::unbind_this_thread();
}
s_thread_pool_size[0] = 0;
s_thread_pool_size[1] = 0;
s_thread_pool_size[2] = 0;
// Reset master thread to run solo.
s_threads_process.m_numa_rank = 0;
s_threads_process.m_numa_core_rank = 0;
s_threads_process.m_pool_base = nullptr;
s_threads_process.m_pool_rank = 0;
s_threads_process.m_pool_size = 1;
s_threads_process.m_pool_fan_size = 0;
s_threads_process.m_pool_state = ThreadsExec::Inactive;
#if defined(KOKKOS_ENABLE_PROFILING)
Kokkos::Profiling::finalize();
#endif
}
//----------------------------------------------------------------------------
} /* namespace Impl */
} /* namespace Kokkos */
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
namespace Kokkos {
int Threads::concurrency() {
#ifdef KOKKOS_ENABLE_DEPRECATED_CODE
return thread_pool_size(0);
#else
return impl_thread_pool_size(0);
#endif
}
#ifndef KOKKOS_ENABLE_DEPRECATED_CODE
void Threads::fence() const { Impl::ThreadsExec::fence(); }
#endif
#ifdef KOKKOS_ENABLE_DEPRECATED_CODE
Threads &Threads::instance(int)
#else
Threads &Threads::impl_instance(int)
#endif
{
static Threads t;
return t;
}
#ifdef KOKKOS_ENABLE_DEPRECATED_CODE
int Threads::thread_pool_size(int depth)
#else
int Threads::impl_thread_pool_size(int depth)
#endif
{
return Impl::s_thread_pool_size[depth];
}
#if defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST)
#ifdef KOKKOS_ENABLE_DEPRECATED_CODE
int Threads::thread_pool_rank()
#else
int Threads::impl_thread_pool_rank()
#endif
{
const pthread_t pid = pthread_self();
int i = 0;
while ((i < Impl::s_thread_pool_size[0]) && (pid != Impl::s_threads_pid[i])) {
++i;
}
return i;
}
#endif
const char *Threads::name() { return "Threads"; }
} /* namespace Kokkos */
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
#else
void KOKKOS_CORE_SRC_THREADS_EXEC_PREVENT_LINK_ERROR() {}
#endif /* #if defined( KOKKOS_ENABLE_THREADS ) */
| 1 | 21,532 | This change is responsible for #2775 | kokkos-kokkos | cpp |
@@ -88,7 +88,11 @@ namespace Datadog.Trace.Tests.RuntimeMetrics
writer.PushEvents();
statsd.Verify(
- s => s.Increment(MetricsNames.ExceptionsCount, It.IsAny<int>(), It.IsAny<double>(), It.IsAny<string[]>()),
+ s => s.Increment(MetricsNames.ExceptionsCount, It.IsAny<int>(), It.IsAny<double>(), new[] { "exception_type:CustomException1" }),
+ Times.Never);
+
+ statsd.Verify(
+ s => s.Increment(MetricsNames.ExceptionsCount, It.IsAny<int>(), It.IsAny<double>(), new[] { "exception_type:CustomException2" }),
Times.Never);
}
} | 1 | using System;
using System.Linq;
using System.Threading;
using Datadog.Trace.RuntimeMetrics;
using Datadog.Trace.Vendors.StatsdClient;
using Moq;
using Xunit;
namespace Datadog.Trace.Tests.RuntimeMetrics
{
[CollectionDefinition(nameof(RuntimeMetricsWriterTests), DisableParallelization = true)]
[Collection(nameof(RuntimeMetricsWriterTests))]
public class RuntimeMetricsWriterTests
{
[Fact]
public void PushEvents()
{
var listener = new Mock<IRuntimeMetricsListener>();
var mutex = new ManualResetEventSlim();
listener.Setup(l => l.Refresh())
.Callback(() => mutex.Set());
using (new RuntimeMetricsWriter(Mock.Of<IDogStatsd>(), TimeSpan.FromMilliseconds(10), (_, d) => listener.Object))
{
Assert.True(mutex.Wait(10000), "Method Refresh() wasn't called on the listener");
}
}
[Fact]
public void ShouldSwallowFactoryExceptions()
{
Func<IDogStatsd, TimeSpan, IRuntimeMetricsListener> factory = (_, d) => throw new InvalidOperationException("This exception should be caught");
var writer = new RuntimeMetricsWriter(Mock.Of<IDogStatsd>(), TimeSpan.FromMilliseconds(10), factory);
writer.Dispose();
}
[Fact]
public void ShouldCaptureFirstChanceExceptions()
{
var statsd = new Mock<IDogStatsd>();
using (var writer = new RuntimeMetricsWriter(statsd.Object, TimeSpan.FromMilliseconds(Timeout.Infinite), (_, d) => Mock.Of<IRuntimeMetricsListener>()))
{
for (int i = 0; i < 10; i++)
{
try
{
throw new CustomException1();
}
catch
{
// ignored
}
if (i % 2 == 0)
{
try
{
throw new CustomException2();
}
catch
{
// ignored
}
}
}
statsd.Verify(
s => s.Increment(MetricsNames.ExceptionsCount, It.IsAny<int>(), It.IsAny<double>(), It.IsAny<string[]>()),
Times.Never);
writer.PushEvents();
statsd.Verify(
s => s.Increment(MetricsNames.ExceptionsCount, 10, It.IsAny<double>(), new[] { "exception_type:CustomException1" }),
Times.Once);
statsd.Verify(
s => s.Increment(MetricsNames.ExceptionsCount, 5, It.IsAny<double>(), new[] { "exception_type:CustomException2" }),
Times.Once);
statsd.Invocations.Clear();
// Make sure stats are reset when pushed
writer.PushEvents();
statsd.Verify(
s => s.Increment(MetricsNames.ExceptionsCount, It.IsAny<int>(), It.IsAny<double>(), It.IsAny<string[]>()),
Times.Never);
}
}
[Fact]
public void CleanupResources()
{
var statsd = new Mock<IDogStatsd>();
var runtimeListener = new Mock<IRuntimeMetricsListener>();
var writer = new RuntimeMetricsWriter(statsd.Object, TimeSpan.FromMilliseconds(Timeout.Infinite), (_, d) => runtimeListener.Object);
writer.Dispose();
runtimeListener.Verify(l => l.Dispose(), Times.Once);
// Make sure that the writer unsubscribed from the global exception handler
try
{
throw new CustomException1();
}
catch
{
// ignored
}
writer.ExceptionCounts.TryGetValue(nameof(CustomException1), out var count);
Assert.Equal(0, count);
}
private class CustomException1 : Exception
{
}
private class CustomException2 : Exception
{
}
}
}
| 1 | 19,343 | Should we add this to the other "never" verification in lines 71-73 above, too? | DataDog-dd-trace-dotnet | .cs |
@@ -175,12 +175,12 @@ class CaiApiClientImpl(gcp.ApiClientImpl):
dict: Generator of datasets.
"""
- resources = list(self.dao.iter_cai_assets(
+ resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.cloud.bigquery.Dataset',
'//cloudresourcemanager.googleapis.com/projects/{}'.format(
project_number),
- self.session))
+ self.session)
for dataset in resources:
yield dataset | 1 | # Copyright 2018 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Asset and GCP API hybrid client fassade."""
# pylint: disable=too-many-lines
import threading
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.services import db
from google.cloud.forseti.services.inventory.base import gcp
from google.cloud.forseti.services.inventory.base import iam_helpers
from google.cloud.forseti.services.inventory.storage import CaiDataAccess
from google.cloud.forseti.services.inventory.storage import ContentTypes
LOCAL_THREAD = threading.local()
LOGGER = logger.get_logger(__name__)
def _fixup_resource_keys(resource, key_map, only_fixup_lists=False):
"""Correct different attribute names between CAI and json representation.
Args:
resource (dict): The resource dictionary to scan for keys in the
key_map.
key_map (dict): A map of bad_key:good_key pairs, any instance of bad_key
in the resource dict is replaced with an instance of good_key.
only_fixup_lists (bool): If true, only keys that have values which are
lists will be fixed. This allows the case where there is the same
key used for both a scalar entry and a list entry, and only the
list entry should change to the different key.
Returns:
dict: A resource dict with all bad keys replaced with good keys.
"""
fixed_resource = {}
for key, value in resource.items():
if isinstance(value, dict):
# Recursively fix keys in sub dictionaries.
value = _fixup_resource_keys(value, key_map)
elif isinstance(value, list):
# Recursively fix keys in sub dictionaries in lists.
new_value = []
for item in value:
if isinstance(item, dict):
item = _fixup_resource_keys(item, key_map)
new_value.append(item)
value = new_value
# Only replace the old key with the new key if the value of the field
# is a list. This behavior can be overridden by setting the optional
# argument only_fixup_lists to False.
should_update_key = bool(
not only_fixup_lists or isinstance(value, list))
if key in key_map and should_update_key:
fixed_resource[key_map[key]] = value
else:
fixed_resource[key] = value
return fixed_resource
# pylint: disable=too-many-public-methods
class CaiApiClientImpl(gcp.ApiClientImpl):
"""The gcp api client Implementation"""
def __init__(self, config, engine, parallel, session):
"""Initialize.
Args:
config (dict): GCP API client configuration.
engine (object): Database engine to operate on.
parallel (bool): If true, use the parallel crawler implementation.
session (object): Database session.
"""
super(CaiApiClientImpl, self).__init__(config)
self.dao = CaiDataAccess()
self.engine = engine
self.parallel = parallel
self.cai_session = session
self._local = LOCAL_THREAD
@property
def session(self):
"""Return a thread local CAI read only session object.
Returns:
object: A thread local Session.
"""
if not self.parallel:
# SQLite doesn't support per thread sessions cleanly, so use global
# session.
return self.cai_session
if hasattr(self._local, 'cai_session'):
return self._local.cai_session
self._local.cai_session = db.create_readonly_session(engine=self.engine)
return self._local.cai_session
def fetch_bigquery_iam_policy(self, project_id, project_number, dataset_id):
"""Gets IAM policy of a bigquery dataset from Cloud Asset data.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
dataset_id (str): id of the dataset to query.
Returns:
dict: Dataset IAM Policy.
"""
bigquery_name_fmt = '//bigquery.googleapis.com/projects/{}/datasets/{}'
# Try fetching with project id, if that returns nothing, fall back to
# project number.
resource = self.dao.fetch_cai_asset(
ContentTypes.iam_policy,
'google.cloud.bigquery.Dataset',
bigquery_name_fmt.format(project_id, dataset_id),
self.session)
if not resource:
resource = self.dao.fetch_cai_asset(
ContentTypes.iam_policy,
'google.cloud.bigquery.Dataset',
bigquery_name_fmt.format(project_number, dataset_id),
self.session)
if resource:
return resource
return {}
def fetch_bigquery_dataset_policy(self, project_id, project_number,
dataset_id):
"""Dataset policy Iterator for a dataset from Cloud Asset data.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
dataset_id (str): id of the dataset to query.
Returns:
dict: Dataset Policy.
"""
resource = self.fetch_bigquery_iam_policy(
project_id, project_number, dataset_id)
if resource:
return iam_helpers.convert_iam_to_bigquery_policy(resource)
# Fall back to live API if the data isn't in the CAI cache.
return super(CaiApiClientImpl, self).fetch_bigquery_dataset_policy(
project_id, project_number, dataset_id)
def iter_bigquery_datasets(self, project_number):
"""Iterate Datasets from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of datasets.
"""
resources = list(self.dao.iter_cai_assets(
ContentTypes.resource,
'google.cloud.bigquery.Dataset',
'//cloudresourcemanager.googleapis.com/projects/{}'.format(
project_number),
self.session))
for dataset in resources:
yield dataset
def fetch_billing_account_iam_policy(self, account_id):
"""Gets IAM policy of a Billing Account from Cloud Asset data.
Args:
account_id (str): id of the billing account to get policy.
Returns:
dict: Billing Account IAM policy.
"""
resource = self.dao.fetch_cai_asset(
ContentTypes.iam_policy,
'google.cloud.billing.BillingAccount',
'//cloudbilling.googleapis.com/{}'.format(account_id),
self.session)
if resource:
return resource
# Fall back to live API if the data isn't in the CAI cache.
return super(CaiApiClientImpl, self).fetch_billing_account_iam_policy(
account_id)
def iter_billing_accounts(self):
"""Iterate Billing Accounts in an organization from Cloud Asset data.
Yields:
dict: Generator of billing accounts.
"""
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.cloud.billing.BillingAccount',
'', # Billing accounts have no parent resource.
self.session)
for account in resources:
yield account
def iter_cloudsql_instances(self, project_number):
"""Iterate Cloud sql instances from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of cloudsql instance.
"""
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.cloud.sql.Instance',
'//cloudresourcemanager.googleapis.com/projects/{}'.format(
project_number),
self.session)
for instance in resources:
yield instance
def _iter_compute_resources(self, asset_type, project_number):
"""Iterate Compute resources from Cloud Asset data.
Args:
asset_type (str): The Compute asset type to iterate.
project_number (str): number of the project to query.
Returns:
generator: A generator of resources from Cloud Asset data.
"""
return self.dao.iter_cai_assets(
ContentTypes.resource,
'google.compute.{}'.format(asset_type),
'//cloudresourcemanager.googleapis.com/projects/{}'.format(
project_number),
self.session)
def iter_compute_autoscalers(self, project_number):
"""Iterate Autoscalers from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of autoscaler resources.
"""
resources = self._iter_compute_resources('Autoscaler', project_number)
for autoscaler in resources:
yield autoscaler
def iter_compute_backendbuckets(self, project_number):
"""Iterate Backend buckets from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of backend bucket resources.
"""
resources = self._iter_compute_resources('BackendBucket',
project_number)
for backendbucket in resources:
yield backendbucket
def iter_compute_backendservices(self, project_number):
"""Iterate Backend services from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of backend service.
"""
cai_to_gcp_key_map = {
'backend': 'backends',
'healthCheck': 'healthChecks',
}
resources = self._iter_compute_resources('BackendService',
project_number)
for backendservice in resources:
yield _fixup_resource_keys(backendservice, cai_to_gcp_key_map)
def iter_compute_disks(self, project_number):
"""Iterate Compute Engine disks from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of Compute Disk.
"""
cai_to_gcp_key_map = {
'license': 'licenses',
'guestOsFeature': 'guestOsFeatures',
'user': 'users',
'replicaZone': 'replicaZones',
'licenseCode': 'licenseCodes',
}
resources = self._iter_compute_resources('Disk', project_number)
for disk in resources:
yield _fixup_resource_keys(disk, cai_to_gcp_key_map)
def iter_compute_firewalls(self, project_number):
"""Iterate Compute Engine Firewalls from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of Compute Engine Firewall.
"""
cai_to_gcp_key_map = {
'ipProtocol': 'IPProtocol',
'port': 'ports',
'sourceRange': 'sourceRanges',
'sourceServiceAccount': 'sourceServiceAccounts',
'sourceTag': 'sourceTags',
'targetRange': 'targetRanges',
'targetServiceAccount': 'targetServiceAccounts',
'targetTag': 'targetTags',
}
resources = self._iter_compute_resources('Firewall', project_number)
for rule in resources:
yield _fixup_resource_keys(rule, cai_to_gcp_key_map)
def iter_compute_forwardingrules(self, project_number):
"""Iterate Forwarding Rules from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of forwarding rule resources.
"""
cai_to_gcp_key_map = {
'ipAddress': 'IPAddress',
'ipProtocol': 'IPProtocol',
}
resources = self._iter_compute_resources('ForwardingRule',
project_number)
for forwarding_rule in resources:
yield _fixup_resource_keys(forwarding_rule, cai_to_gcp_key_map)
def iter_compute_healthchecks(self, project_number):
"""Iterate Health checks from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of health check resources.
"""
resources = self._iter_compute_resources('HealthCheck', project_number)
for healthcheck in resources:
yield healthcheck
def iter_compute_httphealthchecks(self, project_number):
"""Iterate HTTP Health checks from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of HTTP health check resources.
"""
resources = self._iter_compute_resources('HttpHealthCheck',
project_number)
for httphealthcheck in resources:
yield httphealthcheck
def iter_compute_httpshealthchecks(self, project_number):
"""Iterate HTTPS Health checks from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of HTTPS health check resources.
"""
resources = self._iter_compute_resources('HttpsHealthCheck',
project_number)
for httpshealthcheck in resources:
yield httpshealthcheck
def iter_compute_ig_managers(self, project_number):
"""Iterate Instance Group Manager from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of instance group manager resources.
"""
cai_to_gcp_key_map = {
'namedPort': 'namedPorts',
'targetPool': 'targetPools',
}
resources = self._iter_compute_resources('InstanceGroupManager',
project_number)
for igmanager in resources:
yield _fixup_resource_keys(igmanager, cai_to_gcp_key_map)
def iter_compute_images(self, project_number):
"""Iterate Images from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of image resources.
"""
cai_to_gcp_key_map = {
'guestOsFeature': 'guestOsFeatures',
'license': 'licenses',
'licenseCode': 'licenseCodes',
}
resources = self._iter_compute_resources('Image', project_number)
for image in resources:
yield _fixup_resource_keys(image, cai_to_gcp_key_map)
def iter_compute_instancegroups(self, project_number):
"""Iterate Compute Engine groups from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of Compute Instance group.
"""
cai_to_gcp_key_map = {
'namedPort': 'namedPorts',
}
resources = self._iter_compute_resources('InstanceGroup',
project_number)
for instancegroup in resources:
yield _fixup_resource_keys(instancegroup, cai_to_gcp_key_map)
def iter_compute_instances(self, project_number):
"""Iterate compute engine instance from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of Compute Engine Instance resources.
"""
cai_to_gcp_key_map = {
'accessConfig': 'accessConfigs',
'aliasIpRange': 'aliasIpRanges',
'disk': 'disks',
'diskConfig': 'diskConfigs',
'guestAccelerator': 'guestAccelerators',
'guestOsFeature': 'guestOsFeatures',
'item': 'items',
'license': 'licenses',
'networkInterface': 'networkInterfaces',
'nodeAffinity': 'nodeAffinities',
'resourcePolicy': 'resourcePolicies',
'scope': 'scopes',
'serviceAccount': 'serviceAccounts',
'tag': 'tags',
}
resources = self._iter_compute_resources('Instance', project_number)
for instance in resources:
yield _fixup_resource_keys(instance, cai_to_gcp_key_map)
def iter_compute_instancetemplates(self, project_number):
"""Iterate Instance Templates from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of instance template resources.
"""
cai_to_gcp_key_map = {
'accessConfig': 'accessConfigs',
'aliasIpRange': 'aliasIpRanges',
'disk': 'disks',
'diskConfig': 'diskConfigs',
'guestAccelerator': 'guestAccelerators',
'guestOsFeature': 'guestOsFeatures',
'item': 'items',
'license': 'licenses',
'networkInterface': 'networkInterfaces',
'nodeAffinity': 'nodeAffinities',
'resourcePolicy': 'resourcePolicies',
'scope': 'scopes',
'serviceAccount': 'serviceAccounts',
'tag': 'tags',
}
resources = self._iter_compute_resources('InstanceTemplate',
project_number)
for instancetemplate in resources:
yield _fixup_resource_keys(instancetemplate, cai_to_gcp_key_map)
def iter_compute_licenses(self, project_number):
"""Iterate Licenses from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of license resources.
"""
resources = self._iter_compute_resources('License', project_number)
for compute_license in resources:
yield compute_license
def iter_compute_networks(self, project_number):
"""Iterate Networks from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of network resources.
"""
cai_to_gcp_key_map = {
'subnetwork': 'subnetworks',
}
resources = self._iter_compute_resources('Network', project_number)
for network in resources:
yield _fixup_resource_keys(network, cai_to_gcp_key_map)
def iter_compute_project(self, project_number):
"""Iterate Project from Cloud Asset data.
Will only ever return up to 1 result. Ensures compatibility with other
resource iterators.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of compute project resources.
"""
cai_to_gcp_key_map = {
'enabledFeature': 'enabledFeatures',
}
resources = self._iter_compute_resources('Project', project_number)
for project in resources:
yield _fixup_resource_keys(project, cai_to_gcp_key_map)
def iter_compute_routers(self, project_number):
"""Iterate Compute Engine routers from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of Compute Routers.
"""
cai_to_gcp_key_map = {
'advertisedGroup': 'advertisedGroups',
'advertisedIpRange': 'advertisedIpRanges',
'bgpPeer': 'bgpPeers',
'interface': 'interfaces',
}
resources = self._iter_compute_resources('Router', project_number)
for router in resources:
yield _fixup_resource_keys(router, cai_to_gcp_key_map)
def iter_compute_snapshots(self, project_number):
"""Iterate Compute Engine snapshots from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of Compute Snapshots.
"""
cai_to_gcp_key_map = {
'guestOsFeature': 'guestOsFeatures',
'license': 'licenses',
'licenseCode': 'licenseCodes',
}
resources = self._iter_compute_resources('Snapshot', project_number)
for snapshot in resources:
yield _fixup_resource_keys(snapshot, cai_to_gcp_key_map)
def iter_compute_sslcertificates(self, project_number):
"""Iterate SSL certificates from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of ssl certificate resources.
"""
resources = self._iter_compute_resources('SslCertificate',
project_number)
for sslcertificate in resources:
yield sslcertificate
def iter_compute_subnetworks(self, project_number):
"""Iterate Subnetworks from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of subnetwork resources.
"""
resources = self._iter_compute_resources('Subnetwork',
project_number)
for subnetwork in resources:
yield subnetwork
def iter_compute_targethttpproxies(self, project_number):
"""Iterate Target HTTP proxies from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of target http proxy resources.
"""
resources = self._iter_compute_resources('TargetHttpProxy',
project_number)
for targethttpproxy in resources:
yield targethttpproxy
def iter_compute_targethttpsproxies(self, project_number):
"""Iterate Target HTTPS proxies from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of target https proxy resources.
"""
cai_to_gcp_key_map = {
'sslCertificate': 'sslCertificates',
}
resources = self._iter_compute_resources('TargetHttpsProxy',
project_number)
for targethttpsproxy in resources:
yield _fixup_resource_keys(targethttpsproxy, cai_to_gcp_key_map)
def iter_compute_targetinstances(self, project_number):
"""Iterate Target Instances from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of target instance resources.
"""
resources = self._iter_compute_resources('TargetInstance',
project_number)
for targetinstance in resources:
yield targetinstance
def iter_compute_targetpools(self, project_number):
"""Iterate Target Pools from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of target pool resources.
"""
cai_to_gcp_key_map = {
'healthCheck': 'healthChecks',
'instance': 'instances',
}
resources = self._iter_compute_resources('TargetPool', project_number)
for targetpool in resources:
yield _fixup_resource_keys(targetpool, cai_to_gcp_key_map)
def iter_compute_targetsslproxies(self, project_number):
"""Iterate Target SSL proxies from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of target ssl proxy resources.
"""
cai_to_gcp_key_map = {
'sslCertificate': 'sslCertificates',
}
resources = self._iter_compute_resources('TargetSslProxy',
project_number)
for targetsslproxy in resources:
yield _fixup_resource_keys(targetsslproxy, cai_to_gcp_key_map)
def iter_compute_targettcpproxies(self, project_number):
"""Iterate Target TCP proxies from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of target tcp proxy resources.
"""
resources = self._iter_compute_resources('TargetTcpProxy',
project_number)
for targettcpproxy in resources:
yield targettcpproxy
def iter_compute_targetvpngateways(self, project_number):
"""Iterate Target VPN Gateways from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of target tcp proxy resources.
"""
cai_to_gcp_key_map = {
'forwardingRule': 'forwardingRules',
'tunnel': 'tunnels',
}
resources = self._iter_compute_resources('TargetVpnGateway',
project_number)
for targetvpngateway in resources:
yield _fixup_resource_keys(targetvpngateway, cai_to_gcp_key_map)
def iter_compute_urlmaps(self, project_number):
"""Iterate URL maps from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of url map resources.
"""
cai_to_gcp_key_map = {
'host': 'hosts',
'hostRule': 'hostRules',
'path': 'paths',
'pathMatcher': 'pathMatchers',
'pathRule': 'pathRules',
'test': 'tests'
}
resources = self._iter_compute_resources('UrlMap', project_number)
for urlmap in resources:
# 'path' can be singular when scalar or plural when a list, so
# turn on only_fixup_lists, so the singular instance isn't munged.
yield _fixup_resource_keys(urlmap, cai_to_gcp_key_map,
only_fixup_lists=True)
def iter_compute_vpntunnels(self, project_number):
"""Iterate VPN tunnels from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of vpn tunnel resources.
"""
resources = self._iter_compute_resources('VpnTunnel', project_number)
for vpntunnel in resources:
yield vpntunnel
def iter_container_clusters(self, project_number):
"""Iterate Kubernetes Engine Cluster from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of Kubernetes Engine Cluster resources.
"""
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.container.Cluster',
'//cloudresourcemanager.googleapis.com/projects/{}'.format(
project_number),
self.session)
for cluster in resources:
yield cluster
def fetch_crm_folder(self, folder_id):
"""Fetch Folder data from Cloud Asset data.
Args:
folder_id (str): id of the folder to query.
Returns:
dict: Folder resource.
"""
resource = self.dao.fetch_cai_asset(
ContentTypes.resource,
'google.cloud.resourcemanager.Folder',
'//cloudresourcemanager.googleapis.com/{}'.format(folder_id),
self.session)
if resource:
return resource
# Fall back to live API if the data isn't in the CAI cache.
return super(CaiApiClientImpl, self).fetch_crm_folder(folder_id)
def fetch_crm_folder_iam_policy(self, folder_id):
"""Folder IAM policy in a folder from Cloud Asset data.
Args:
folder_id (str): id of the folder to get policy.
Returns:
dict: Folder IAM policy.
"""
resource = self.dao.fetch_cai_asset(
ContentTypes.iam_policy,
'google.cloud.resourcemanager.Folder',
'//cloudresourcemanager.googleapis.com/{}'.format(folder_id),
self.session)
if resource:
return resource
# Fall back to live API if the data isn't in the CAI cache.
return super(CaiApiClientImpl, self).fetch_crm_folder_iam_policy(
folder_id)
def fetch_crm_organization(self, org_id):
"""Fetch Organization data from Cloud Asset data.
Args:
org_id (str): id of the organization to get.
Returns:
dict: Organization resource.
"""
resource = self.dao.fetch_cai_asset(
ContentTypes.resource,
'google.cloud.resourcemanager.Organization',
'//cloudresourcemanager.googleapis.com/{}'.format(org_id),
self.session)
if resource:
return resource
# Fall back to live API if the data isn't in the CAI cache.
return super(CaiApiClientImpl, self).fetch_crm_organization(org_id)
def fetch_crm_organization_iam_policy(self, org_id):
"""Organization IAM policy from Cloud Asset data.
Args:
org_id (str): id of the organization to get policy.
Returns:
dict: Organization IAM policy.
"""
resource = self.dao.fetch_cai_asset(
ContentTypes.iam_policy,
'google.cloud.resourcemanager.Organization',
'//cloudresourcemanager.googleapis.com/{}'.format(org_id),
self.session)
if resource:
return resource
# Fall back to live API if the data isn't in the CAI cache.
return super(CaiApiClientImpl, self).fetch_crm_organization_iam_policy(
org_id)
def fetch_crm_project(self, project_number):
"""Fetch Project data from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Returns:
dict: Project resource.
"""
resource = self.dao.fetch_cai_asset(
ContentTypes.resource,
'google.cloud.resourcemanager.Project',
'//cloudresourcemanager.googleapis.com/projects/{}'.format(
project_number),
self.session)
if resource:
return resource
# Fall back to live API if the data isn't in the CAI cache.
return super(CaiApiClientImpl, self).fetch_crm_project(project_number)
def fetch_crm_project_iam_policy(self, project_number):
"""Project IAM policy from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Returns:
dict: Project IAM Policy.
"""
resource = self.dao.fetch_cai_asset(
ContentTypes.iam_policy,
'google.cloud.resourcemanager.Project',
'//cloudresourcemanager.googleapis.com/projects/{}'.format(
project_number),
self.session)
if resource:
return resource
# Fall back to live API if the data isn't in the CAI cache.
return super(CaiApiClientImpl, self).fetch_crm_project_iam_policy(
project_number)
def iter_crm_folders(self, parent_id):
"""Iterate Folders from Cloud Asset data.
Args:
parent_id (str): id of the parent of the folder
Yields:
dict: Generator of folders
"""
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.cloud.resourcemanager.Folder',
'//cloudresourcemanager.googleapis.com/{}'.format(parent_id),
self.session)
for folder in resources:
yield folder
def iter_crm_projects(self, parent_type, parent_id):
"""Iterate Projects from Cloud Asset data.
Args:
parent_type (str): type of the parent, "folder" or "organization".
parent_id (str): id of the parent of the folder.
Yields:
dict: Generator of Project resources
"""
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.cloud.resourcemanager.Project',
'//cloudresourcemanager.googleapis.com/{}s/{}'.format(parent_type,
parent_id),
self.session)
for project in resources:
yield project
def fetch_dataproc_cluster_iam_policy(self, cluster):
"""Fetch Dataproc Cluster IAM Policy from Cloud Asset data.
Args:
cluster (str): The Dataproc cluster to query, must be in the format
projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}
Returns:
dict: Cluster IAM policy.
"""
resource = self.dao.fetch_cai_asset(
ContentTypes.iam_policy,
'google.cloud.dataproc.Cluster',
'//dataproc.googleapis.com/{}'.format(cluster),
self.session)
if resource:
return resource
# Clusters with no IAM policy return an empty dict.
return {}
def iter_dataproc_clusters(self, project_id, region=None):
"""Iterate Dataproc clusters from GCP API.
Args:
project_id (str): id of the project to query.
region (str): The region to query. Not required when using Cloud
Asset API.
Yields:
dict: Generator of Cluster resources.
"""
del region # Used by API not CAI.
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.cloud.dataproc.Cluster',
'//dataproc.googleapis.com/projects/{}'.format(project_id),
self.session)
for cluster in resources:
yield cluster
def iter_dns_managedzones(self, project_number):
"""Iterate CloudDNS Managed Zones from Cloud Asset data.
Args:
project_number (str): number of the parent project.
Yields:
dict: Generator of ManagedZone resources
"""
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.cloud.dns.ManagedZone',
'//cloudresourcemanager.googleapis.com/projects/{}'.format(
project_number),
self.session)
for managedzone in resources:
yield managedzone
def iter_dns_policies(self, project_number):
"""Iterate CloudDNS Policies from Cloud Asset data.
Args:
project_number (str): number of the parent project of the policy.
Yields:
dict: Generator of ManagedZone resources
"""
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.cloud.dns.Policy',
'//cloudresourcemanager.googleapis.com/projects/{}'.format(
project_number),
self.session)
for policy in resources:
yield policy
def fetch_gae_app(self, project_id):
"""Fetch the AppEngine App from Cloud Asset data.
Args:
project_id (str): id of the project to query
Returns:
dict: AppEngine App resource.
"""
resource = self.dao.fetch_cai_asset(
ContentTypes.resource,
'google.appengine.Application',
'//appengine.googleapis.com/apps/{}'.format(project_id),
self.session)
return resource
def iter_gae_services(self, project_id):
"""Iterate gae services from Cloud Asset data.
Args:
project_id (str): id of the project to query
Yields:
dict: Generator of AppEngine Service resources.
"""
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.appengine.Service',
'//appengine.googleapis.com/apps/{}'.format(project_id),
self.session)
for service in resources:
yield service
def iter_gae_versions(self, project_id, service_id):
"""Iterate gae versions from Cloud Asset data.
Args:
project_id (str): id of the project to query
service_id (str): id of the appengine service
Yields:
dict: Generator of AppEngine Version resources.
"""
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.appengine.Version',
'//appengine.googleapis.com/apps/{}/services/{}'.format(project_id,
service_id),
self.session)
for version in resources:
yield version
def fetch_iam_serviceaccount_iam_policy(self, name, unique_id):
"""Service Account IAM policy from Cloud Asset data.
Args:
name (str): The service account name to query, must be in the format
projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}
unique_id (str): The unique id of the service account.
Returns:
dict: Service Account IAM policy.
"""
# CAI indexes iam policy by service account unique id, not email.
# This transforms the name to the format expected by CAI.
name_parts = name.split('/')
name_parts[-1] = unique_id
name = '/'.join(name_parts)
resource = self.dao.fetch_cai_asset(
ContentTypes.iam_policy,
'google.iam.ServiceAccount',
'//iam.googleapis.com/{}'.format(name),
self.session)
if resource:
return resource
# Service accounts with no IAM policy return an empty dict.
return {}
def iter_iam_organization_roles(self, org_id):
"""Iterate Organization roles from Cloud Asset data.
Args:
org_id (str): id of the organization to get.
Yields:
dict: Generator of organization role.
"""
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.iam.Role',
'//cloudresourcemanager.googleapis.com/{}'.format(org_id),
self.session)
for role in resources:
yield role
def iter_iam_project_roles(self, project_id, project_number):
"""Iterate Project roles in a project from Cloud Asset data.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
Yields:
dict: Generator of project roles.
"""
del project_id # Used by API not CAI.
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.iam.Role',
'//cloudresourcemanager.googleapis.com/projects/{}'.format(
project_number),
self.session)
for role in resources:
yield role
def iter_iam_serviceaccounts(self, project_id, project_number):
"""Iterate Service Accounts in a project from Cloud Asset data.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
Yields:
dict: Generator of service account.
"""
del project_id # Used by API not CAI.
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.iam.ServiceAccount',
'//cloudresourcemanager.googleapis.com/projects/{}'.format(
project_number),
self.session)
for serviceaccount in resources:
yield serviceaccount
def fetch_kms_cryptokey_iam_policy(self, cryptokey):
"""Fetch KMS Cryptokey IAM Policy from Cloud Asset data.
Args:
cryptokey (str): The KMS cryptokey to query, must be in the format
projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}/
cryptoKeys/{CRYPTOKEY_NAME}
Returns:
dict: KMS Cryptokey IAM policy
"""
resource = self.dao.fetch_cai_asset(
ContentTypes.iam_policy,
'google.cloud.kms.CryptoKey',
'//cloudkms.googleapis.com/{}'.format(cryptokey),
self.session)
if resource:
return resource
# Cryptokeys with no IAM policy return an empty dict.
return {}
def fetch_kms_keyring_iam_policy(self, keyring):
"""Fetch KMS Keyring IAM Policy from Cloud Asset data.
Args:
keyring (str): The KMS keyring to query, must be in the format
projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}
Returns:
dict: KMS Keyring IAM policy
"""
resource = self.dao.fetch_cai_asset(
ContentTypes.iam_policy,
'google.cloud.kms.KeyRing',
'//cloudkms.googleapis.com/{}'.format(keyring),
self.session)
if resource:
return resource
# Keyrings with no IAM policy return an empty dict.
return {}
def iter_kms_cryptokeys(self, parent):
"""Iterate KMS Cryptokeys in a keyring from Cloud Asset data.
Args:
parent (str): The KMS keyring to query, must be in the format
projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}
Yields:
dict: Generator of KMS Cryptokey resources
"""
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.cloud.kms.CryptoKey',
'//cloudkms.googleapis.com/{}'.format(parent),
self.session)
for cryptokey in resources:
yield cryptokey
def iter_kms_cryptokeyversions(self, parent):
"""Iterate KMS Cryptokey Versions from Cloud Asset data.
Args:
parent (str): The KMS keyring to query, must be in the format
projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}/
cryptoKeys/{CRYPTOKEY_NAME}
Yields:
dict: Generator of KMS Cryptokeyversion resources
"""
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.cloud.kms.CryptoKeyVersion',
'//cloudkms.googleapis.com/{}'.format(parent),
self.session)
for cryptokeyversion in resources:
yield cryptokeyversion
def iter_kms_keyrings(self, project_id, location=None):
"""Iterate KMS Keyrings in a project from Cloud Asset data.
Args:
project_id (str): id of the project to query.
location (str): The location to query. Not required when
using Cloud Asset API.
Yields:
dict: Generator of KMS Keyring resources
"""
del location # Used by API not CAI.
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.cloud.kms.KeyRing',
'//cloudkms.googleapis.com/projects/{}'.format(project_id),
self.session)
for keyring in resources:
yield keyring
def fetch_pubsub_subscription_iam_policy(self, name):
"""PubSub Subscription IAM policy from Cloud Asset data.
Args:
name (str): The pubsub topic to query, must be in the format
projects/{PROJECT_ID}/subscriptions/{SUBSCRIPTION_NAME}
Returns:
dict: PubSub Topic IAM policy
"""
resource = self.dao.fetch_cai_asset(
ContentTypes.iam_policy,
'google.pubsub.Subscription',
'//pubsub.googleapis.com/{}'.format(name),
self.session)
if resource:
return resource
# Subscriptions with no IAM policy return an empty dict.
return {}
def fetch_pubsub_topic_iam_policy(self, name):
"""PubSub Topic IAM policy from Cloud Asset data.
Args:
name (str): The pubsub topic to query, must be in the format
projects/{PROJECT_ID}/topics/{TOPIC_NAME}
Returns:
dict: PubSub Topic IAM policy
"""
resource = self.dao.fetch_cai_asset(
ContentTypes.iam_policy,
'google.pubsub.Topic',
'//pubsub.googleapis.com/{}'.format(name),
self.session)
if resource:
return resource
# Topics with no IAM policy return an empty dict.
return {}
def iter_pubsub_subscriptions(self, project_id, project_number):
"""Iterate PubSub subscriptions from GCP API.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
Yields:
dict: Generator of Pubsub Subscription resources
"""
del project_id # Used by API not CAI.
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.pubsub.Subscription',
'//cloudresourcemanager.googleapis.com/projects/{}'.format(
project_number),
self.session)
for subscription in resources:
yield subscription
def iter_pubsub_topics(self, project_id, project_number):
"""Iterate PubSub topics from Cloud Asset data.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
Yields:
dict: Generator of Pubsub Topic resources
"""
del project_id # Used by API not CAI.
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.pubsub.Topic',
'//cloudresourcemanager.googleapis.com/projects/{}'.format(
project_number),
self.session)
for topic in resources:
yield topic
def iter_spanner_instances(self, project_number):
"""Iterate Spanner Instances from Cloud Asset data.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of Spanner Instance resources
"""
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.spanner.Instance',
'//cloudresourcemanager.googleapis.com/projects/{}'.format(
project_number),
self.session)
for spanner_instance in resources:
yield spanner_instance
def iter_spanner_databases(self, parent):
"""Iterate Spanner Databases from Cloud Asset data.
Args:
parent (str): parent spanner instance to query.
Yields:
dict: Generator of Spanner Database resources
"""
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.spanner.Database',
'//spanner.googleapis.com/{}'.format(parent),
self.session)
for spanner_database in resources:
yield spanner_database
def fetch_storage_bucket_acls(self, bucket_id, project_id, project_number):
"""Bucket Access Controls from GCP API.
Args:
bucket_id (str): id of the bucket to query.
project_id (str): id of the project to query.
project_number (str): number of the project to query.
Returns:
list: Bucket Access Controls.
"""
iam_policy = self.fetch_storage_bucket_iam_policy(bucket_id)
if iam_policy:
return iam_helpers.convert_iam_to_bucket_acls(iam_policy,
bucket_id,
project_id,
project_number)
# Return empty list if IAM policy isn't present.
return []
def fetch_storage_bucket_iam_policy(self, bucket_id):
"""Bucket IAM policy Iterator from Cloud Asset data.
Args:
bucket_id (str): id of the bucket to query
Returns:
dict: Bucket IAM policy
"""
resource = self.dao.fetch_cai_asset(
ContentTypes.iam_policy,
'google.cloud.storage.Bucket',
'//storage.googleapis.com/{}'.format(bucket_id),
self.session)
if resource:
return resource
# Fall back to live API if the data isn't in the CAI cache.
return super(CaiApiClientImpl, self).fetch_storage_bucket_iam_policy(
bucket_id)
def iter_storage_buckets(self, project_number):
"""Iterate Buckets from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of buckets.
"""
resources = self.dao.iter_cai_assets(
ContentTypes.resource,
'google.cloud.storage.Bucket',
'//cloudresourcemanager.googleapis.com/projects/{}'.format(
project_number),
self.session)
for bucket in resources:
yield bucket
| 1 | 33,512 | Fixed this since it no longer needed to be switched from a generator to a list, this was missed when I cleaned up the dataset code in an earlier PR. | forseti-security-forseti-security | py |
@@ -70,7 +70,7 @@ public class NodeOptions {
Capabilities caps = info.getCanonicalCapabilities();
builders.stream()
.filter(builder -> builder.score(caps) > 0)
- .peek(builder -> LOG.info(String.format("Adding %s %d times", caps, info.getMaximumSimultaneousSessions())))
+ .peek(builder -> LOG.finest(String.format("Adding %s %d times", caps, info.getMaximumSimultaneousSessions())))
.forEach(builder -> {
DriverService.Builder freePortBuilder = builder.usingAnyFreePort();
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.node.config;
import io.opentracing.Tracer;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.WebDriverInfo;
import org.openqa.selenium.grid.config.Config;
import org.openqa.selenium.grid.node.local.LocalNode;
import org.openqa.selenium.remote.http.HttpClient;
import org.openqa.selenium.remote.service.DriverService;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.ServiceLoader;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
public class NodeOptions {
public static final Logger LOG = Logger.getLogger(NodeOptions.class.getName());
private final Config config;
public NodeOptions(Config config) {
this.config = Objects.requireNonNull(config);
}
public void configure(Tracer tracer, HttpClient.Factory httpClientFactory, LocalNode.Builder node) {
if (!config.getBool("node", "detect-drivers").orElse(false)) {
return;
}
addSystemDrivers(tracer, httpClientFactory, node);
}
private void addSystemDrivers(
Tracer tracer,
HttpClient.Factory clientFactory,
LocalNode.Builder node) {
// We don't expect duplicates, but they're fine
List<WebDriverInfo> infos =
StreamSupport.stream(ServiceLoader.load(WebDriverInfo.class).spliterator(), false)
.filter(WebDriverInfo::isAvailable)
.collect(Collectors.toList());
// Same
List<DriverService.Builder> builders = new ArrayList<>();
ServiceLoader.load(DriverService.Builder.class).forEach(builders::add);
infos.forEach(info -> {
Capabilities caps = info.getCanonicalCapabilities();
builders.stream()
.filter(builder -> builder.score(caps) > 0)
.peek(builder -> LOG.info(String.format("Adding %s %d times", caps, info.getMaximumSimultaneousSessions())))
.forEach(builder -> {
DriverService.Builder freePortBuilder = builder.usingAnyFreePort();
for (int i = 0; i < info.getMaximumSimultaneousSessions(); i++) {
node.add(
caps,
new DriverServiceSessionFactory(
tracer,
clientFactory, c -> freePortBuilder.score(c) > 0,
freePortBuilder));
}
});
});
}
}
| 1 | 17,124 | This is an informational message that allows someone to read the console output and understand how the grid node is configured. Please leave. | SeleniumHQ-selenium | java |
@@ -5,5 +5,17 @@ class ContributorFact < NameFact
.where(analysis_id: project.best_analysis_id)
.where.not(name_id: Position.where.not(name_id: nil).where(project_id: project.id).select(:name_id))
end
+
+ def first_for_name_id_and_project_id(name_id, project_id)
+ sql = <<-SQL
+ SELECT name_facts.* FROM name_facts
+ INNER JOIN projects ON projects.best_analysis_id = name_facts.analysis_id
+ WHERE ( name_facts.name_id = #{name_id.to_i} OR name_facts.name_id IN (
+ SELECT AA.preferred_name_id FROM analysis_aliases AA
+ WHERE AA.analysis_id = projects.best_analysis_id AND AA.commit_name_id = #{name_id.to_i}))
+ AND projects.id = #{project_id.to_i} AND type='ContributorFact'
+ SQL
+ ContributorFact.find_by_sql(sql).first
+ end
end
end | 1 | class ContributorFact < NameFact
class << self
def unclaimed_for_project(project)
ContributorFact.where.not(name_id: nil)
.where(analysis_id: project.best_analysis_id)
.where.not(name_id: Position.where.not(name_id: nil).where(project_id: project.id).select(:name_id))
end
end
end
| 1 | 7,038 | Can we convert this to an arel as below ContributorFact .joins(:project).where(projects: { id: project_id}) .where('name_id = ? or name_id in (?)', name_id, AnalysisAlias.select(:preferred_name_id) .joins(:project) .where(commit_name_id: name_id) ) | blackducksoftware-ohloh-ui | rb |
@@ -11,6 +11,12 @@ import (
type DevicesGroup struct {
}
+type CgroupParams struct {
+ initpid int
+}
+
+var cg CgroupParams
+
func (s *DevicesGroup) Name() string {
return "devices"
} | 1 | // +build linux
package fs
import (
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/system"
)
type DevicesGroup struct {
}
func (s *DevicesGroup) Name() string {
return "devices"
}
func (s *DevicesGroup) Apply(d *cgroupData) error {
_, err := d.join("devices")
if err != nil {
// We will return error even it's `not found` error, devices
// cgroup is hard requirement for container's security.
return err
}
return nil
}
func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
if system.RunningInUserNS() {
return nil
}
devices := cgroup.Resources.Devices
if len(devices) > 0 {
for _, dev := range devices {
file := "devices.deny"
if dev.Allow {
file = "devices.allow"
}
if err := writeFile(path, file, dev.CgroupString()); err != nil {
return err
}
}
return nil
}
if !cgroup.Resources.AllowAllDevices {
if err := writeFile(path, "devices.deny", "a"); err != nil {
return err
}
for _, dev := range cgroup.Resources.AllowedDevices {
if err := writeFile(path, "devices.allow", dev.CgroupString()); err != nil {
return err
}
}
return nil
}
if err := writeFile(path, "devices.allow", "a"); err != nil {
return err
}
for _, dev := range cgroup.Resources.DeniedDevices {
if err := writeFile(path, "devices.deny", dev.CgroupString()); err != nil {
return err
}
}
return nil
}
func (s *DevicesGroup) Remove(d *cgroupData) error {
return removePath(d.path("devices"))
}
func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error {
return nil
}
| 1 | 9,879 | Does this cause issues if you have lots of load on the machine? Having a global variable for this seems like a bad idea IMO. | opencontainers-runc | go |
@@ -1374,7 +1374,15 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
privacyParametersBuilder.setEnabled(true);
privacyParametersBuilder.setEnclaveUrl(privacyUrl);
if (privacyPublicKeyFile() != null) {
- privacyParametersBuilder.setEnclavePublicKeyUsingFile(privacyPublicKeyFile());
+ try {
+ privacyParametersBuilder.setEnclavePublicKeyUsingFile(privacyPublicKeyFile());
+ } catch (final IOException e) {
+ throw new ParameterException(
+ commandLine, "Problem with privacy-public-key-file: " + e.getMessage(), e);
+ } catch (final IllegalArgumentException e) {
+ throw new ParameterException(
+ commandLine, "Contents of privacy-public-key-file invalid: " + e.getMessage(), e);
+ }
} else {
throw new ParameterException(
commandLine, "Please specify Enclave public key file path to enable privacy"); | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli;
import static com.google.common.base.Preconditions.checkNotNull;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
import static org.hyperledger.besu.cli.DefaultCommandValues.getDefaultBesuDataPath;
import static org.hyperledger.besu.cli.config.NetworkName.MAINNET;
import static org.hyperledger.besu.controller.BesuController.DATABASE_PATH;
import static org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration.DEFAULT_GRAPHQL_HTTP_PORT;
import static org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration.DEFAULT_JSON_RPC_PORT;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.DEFAULT_JSON_RPC_APIS;
import static org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration.DEFAULT_WEBSOCKET_PORT;
import static org.hyperledger.besu.metrics.BesuMetricCategory.DEFAULT_METRIC_CATEGORIES;
import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PORT;
import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PUSH_PORT;
import org.hyperledger.besu.BesuInfo;
import org.hyperledger.besu.Runner;
import org.hyperledger.besu.RunnerBuilder;
import org.hyperledger.besu.chainimport.RlpBlockImporter;
import org.hyperledger.besu.cli.config.EthNetworkConfig;
import org.hyperledger.besu.cli.config.NetworkName;
import org.hyperledger.besu.cli.converter.MetricCategoryConverter;
import org.hyperledger.besu.cli.converter.PercentageConverter;
import org.hyperledger.besu.cli.converter.RpcApisConverter;
import org.hyperledger.besu.cli.custom.CorsAllowedOriginsProperty;
import org.hyperledger.besu.cli.custom.JsonRPCWhitelistHostsProperty;
import org.hyperledger.besu.cli.custom.RpcAuthFileValidator;
import org.hyperledger.besu.cli.error.BesuExceptionHandler;
import org.hyperledger.besu.cli.options.EthProtocolOptions;
import org.hyperledger.besu.cli.options.MetricsCLIOptions;
import org.hyperledger.besu.cli.options.NetworkingOptions;
import org.hyperledger.besu.cli.options.PrunerOptions;
import org.hyperledger.besu.cli.options.SynchronizerOptions;
import org.hyperledger.besu.cli.options.TransactionPoolOptions;
import org.hyperledger.besu.cli.subcommands.PasswordSubCommand;
import org.hyperledger.besu.cli.subcommands.PublicKeySubCommand;
import org.hyperledger.besu.cli.subcommands.PublicKeySubCommand.KeyLoader;
import org.hyperledger.besu.cli.subcommands.RetestethSubCommand;
import org.hyperledger.besu.cli.subcommands.blocks.BlocksSubCommand;
import org.hyperledger.besu.cli.subcommands.blocks.BlocksSubCommand.JsonBlockImporterFactory;
import org.hyperledger.besu.cli.subcommands.blocks.BlocksSubCommand.RlpBlockExporterFactory;
import org.hyperledger.besu.cli.subcommands.operator.OperatorSubCommand;
import org.hyperledger.besu.cli.subcommands.rlp.RLPSubCommand;
import org.hyperledger.besu.cli.util.BesuCommandCustomFactory;
import org.hyperledger.besu.cli.util.CommandLineUtils;
import org.hyperledger.besu.cli.util.ConfigOptionSearchAndRunHandler;
import org.hyperledger.besu.cli.util.VersionProvider;
import org.hyperledger.besu.config.GenesisConfigFile;
import org.hyperledger.besu.controller.BesuController;
import org.hyperledger.besu.controller.BesuControllerBuilder;
import org.hyperledger.besu.controller.KeyPairUtil;
import org.hyperledger.besu.enclave.EnclaveFactory;
import org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApi;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis;
import org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.ethereum.core.MiningParameters;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.eth.sync.SyncMode;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConfiguration;
import org.hyperledger.besu.ethereum.p2p.config.DiscoveryConfiguration;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURL;
import org.hyperledger.besu.ethereum.p2p.peers.StaticNodesParser;
import org.hyperledger.besu.ethereum.permissioning.LocalPermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfigurationBuilder;
import org.hyperledger.besu.ethereum.permissioning.SmartContractPermissioningConfiguration;
import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProvider;
import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProviderBuilder;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProviderBuilder;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.metrics.MetricCategoryRegistryImpl;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.metrics.StandardMetricCategory;
import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration;
import org.hyperledger.besu.metrics.prometheus.PrometheusMetricsSystem;
import org.hyperledger.besu.metrics.vertx.VertxMetricsAdapterFactory;
import org.hyperledger.besu.nat.NatMethod;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
import org.hyperledger.besu.plugin.services.BesuEvents;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.PicoCLIOptions;
import org.hyperledger.besu.plugin.services.StorageService;
import org.hyperledger.besu.plugin.services.exception.StorageException;
import org.hyperledger.besu.plugin.services.metrics.MetricCategory;
import org.hyperledger.besu.plugin.services.metrics.MetricCategoryRegistry;
import org.hyperledger.besu.plugin.services.storage.PrivacyKeyValueStorageFactory;
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBPlugin;
import org.hyperledger.besu.services.BesuConfigurationImpl;
import org.hyperledger.besu.services.BesuEventsImpl;
import org.hyperledger.besu.services.BesuPluginContextImpl;
import org.hyperledger.besu.services.PicoCLIOptionsImpl;
import org.hyperledger.besu.services.StorageServiceImpl;
import org.hyperledger.besu.util.NetworkUtility;
import org.hyperledger.besu.util.PermissioningConfigurationValidator;
import org.hyperledger.besu.util.bytes.BytesValue;
import org.hyperledger.besu.util.number.Fraction;
import org.hyperledger.besu.util.number.PositiveNumber;
import org.hyperledger.besu.util.uint.UInt256;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.math.BigInteger;
import java.net.InetAddress;
import java.net.SocketException;
import java.net.URI;
import java.net.UnknownHostException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Clock;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Resources;
import io.vertx.core.Vertx;
import io.vertx.core.VertxOptions;
import io.vertx.core.json.DecodeException;
import io.vertx.core.metrics.MetricsOptions;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.config.Configurator;
import picocli.CommandLine;
import picocli.CommandLine.AbstractParseResultHandler;
import picocli.CommandLine.Command;
import picocli.CommandLine.ExecutionException;
import picocli.CommandLine.Option;
import picocli.CommandLine.ParameterException;
@SuppressWarnings("FieldCanBeLocal") // because Picocli injected fields report false positives
@Command(
description = "This command runs the Besu Ethereum client full node.",
abbreviateSynopsis = true,
name = "besu",
mixinStandardHelpOptions = true,
versionProvider = VersionProvider.class,
header = "Usage:",
synopsisHeading = "%n",
descriptionHeading = "%nDescription:%n%n",
optionListHeading = "%nOptions:%n",
footerHeading = "%n",
footer = "Besu is licensed under the Apache License 2.0")
public class BesuCommand implements DefaultCommandValues, Runnable {
private final Logger logger;
private CommandLine commandLine;
private final RlpBlockImporter rlpBlockImporter;
private final JsonBlockImporterFactory jsonBlockImporterFactory;
private final RlpBlockExporterFactory rlpBlockExporterFactory;
final NetworkingOptions networkingOptions = NetworkingOptions.create();
final SynchronizerOptions synchronizerOptions = SynchronizerOptions.create();
final EthProtocolOptions ethProtocolOptions = EthProtocolOptions.create();
final MetricsCLIOptions metricsCLIOptions = MetricsCLIOptions.create();
final TransactionPoolOptions transactionPoolOptions = TransactionPoolOptions.create();
final PrunerOptions prunerOptions = PrunerOptions.create();
private final RunnerBuilder runnerBuilder;
private final BesuController.Builder controllerBuilderFactory;
private final BesuPluginContextImpl besuPluginContext;
private final StorageServiceImpl storageService;
private final Map<String, String> environment;
private final MetricCategoryRegistryImpl metricCategoryRegistry =
new MetricCategoryRegistryImpl();
private final MetricCategoryConverter metricCategoryConverter = new MetricCategoryConverter();
protected KeyLoader getKeyLoader() {
return KeyPairUtil::loadKeyPair;
}
// Public IP stored to prevent having to research it each time we need it.
private InetAddress autoDiscoveredDefaultIP = null;
// Property to indicate whether Besu has been launched via docker
private final boolean isDocker = Boolean.getBoolean("besu.docker");
// CLI options defined by user at runtime.
// Options parsing is done with CLI library Picocli https://picocli.info/
@Option(
names = "--identity",
paramLabel = "<String>",
description = "Identification for this node in the Client ID",
arity = "1")
private final Optional<String> identityString = Optional.empty();
// Completely disables P2P within Besu.
@Option(
names = {"--p2p-enabled"},
description = "Enable P2P functionality (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean p2pEnabled = true;
// Boolean option to indicate if peers should NOT be discovered, default to
// false indicates that
// the peers should be discovered by default.
//
// This negative option is required because of the nature of the option that is
// true when
// added on the command line. You can't do --option=false, so false is set as
// default
// and you have not to set the option at all if you want it false.
// This seems to be the only way it works with Picocli.
// Also many other software use the same negative option scheme for false
// defaults
// meaning that it's probably the right way to handle disabling options.
@Option(
names = {"--discovery-enabled"},
description = "Enable P2P peer discovery (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean peerDiscoveryEnabled = true;
// A list of bootstrap nodes can be passed
// and a hardcoded list will be used otherwise by the Runner.
// NOTE: we have no control over default value here.
@Option(
names = {"--bootnodes"},
paramLabel = "<enode://id@host:port>",
description =
"Comma separated enode URLs for P2P discovery bootstrap. "
+ "Default is a predefined list.",
split = ",",
arity = "0..*")
void setBootnodes(final List<String> values) {
try {
bootNodes =
values.stream()
.filter(value -> !value.isEmpty())
.map(EnodeURL::fromString)
.collect(Collectors.toList());
DiscoveryConfiguration.assertValidBootnodes(bootNodes);
} catch (final IllegalArgumentException e) {
throw new ParameterException(commandLine, e.getMessage());
}
}
private List<EnodeURL> bootNodes = null;
@Option(
names = {"--max-peers"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum P2P peer connections that can be established (default: ${DEFAULT-VALUE})")
private final Integer maxPeers = DEFAULT_MAX_PEERS;
@Option(
names = {"--remote-connections-limit-enabled"},
description =
"Whether to limit the number of P2P connections initiated remotely. (default: ${DEFAULT-VALUE})")
private final Boolean isLimitRemoteWireConnectionsEnabled = true;
@Option(
names = {"--remote-connections-max-percentage"},
paramLabel = MANDATORY_DOUBLE_FORMAT_HELP,
description =
"The maximum percentage of P2P connections that can be initiated remotely. Must be between 0 and 100 inclusive. (default: ${DEFAULT-VALUE})",
arity = "1",
converter = PercentageConverter.class)
private final Integer maxRemoteConnectionsPercentage =
Fraction.fromFloat(DEFAULT_FRACTION_REMOTE_WIRE_CONNECTIONS_ALLOWED)
.toPercentage()
.getValue();
@Option(
names = {"--banned-node-ids", "--banned-node-id"},
paramLabel = MANDATORY_NODE_ID_FORMAT_HELP,
description = "A list of node IDs to ban from the P2P network.",
split = ",",
arity = "1..*")
void setBannedNodeIds(final List<String> values) {
try {
bannedNodeIds =
values.stream()
.filter(value -> !value.isEmpty())
.map(EnodeURL::parseNodeId)
.collect(Collectors.toList());
} catch (final IllegalArgumentException e) {
throw new ParameterException(
commandLine, "Invalid ids supplied to '--banned-node-ids'. " + e.getMessage());
}
}
private Collection<BytesValue> bannedNodeIds = new ArrayList<>();
@Option(
names = {"--sync-mode"},
paramLabel = MANDATORY_MODE_FORMAT_HELP,
description =
"Synchronization mode, possible values are ${COMPLETION-CANDIDATES} (default: ${DEFAULT-VALUE})")
private final SyncMode syncMode = DEFAULT_SYNC_MODE;
@Option(
names = {"--fast-sync-min-peers"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Minimum number of peers required before starting fast sync. (default: ${DEFAULT-VALUE})")
private final Integer fastSyncMinPeerCount = FAST_SYNC_MIN_PEER_COUNT;
@Option(
names = {"--network"},
paramLabel = MANDATORY_NETWORK_FORMAT_HELP,
description =
"Synchronize against the indicated network, possible values are ${COMPLETION-CANDIDATES}."
+ " (default: MAINNET)")
private final NetworkName network = null;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--p2p-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Ip address this node advertises to its peers (default: ${DEFAULT-VALUE})",
arity = "1")
private String p2pHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--p2p-interface"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description =
"The network interface address on which this node listens for p2p communication (default: ${DEFAULT-VALUE})",
arity = "1")
private String p2pInterface = NetworkUtility.INADDR_ANY;
@Option(
names = {"--p2p-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port on which to listen for p2p communication (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer p2pPort = EnodeURL.DEFAULT_LISTENING_PORT;
@Option(
names = {"--nat-method"},
description =
"Specify the NAT circumvention method to be used, possible values are ${COMPLETION-CANDIDATES}."
+ " NONE disables NAT functionality. (default: ${DEFAULT-VALUE})")
private final NatMethod natMethod = DEFAULT_NAT_METHOD;
@Option(
names = {"--network-id"},
paramLabel = "<BIG INTEGER>",
description =
"P2P network identifier. (default: the selected network chain ID or custom genesis chain ID)",
arity = "1")
private final BigInteger networkId = null;
@Option(
names = {"--graphql-http-enabled"},
description = "Set to start the GraphQL HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isGraphQLHttpEnabled = false;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--graphql-http-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String graphQLHttpHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--graphql-http-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer graphQLHttpPort = DEFAULT_GRAPHQL_HTTP_PORT;
@Option(
names = {"--graphql-http-cors-origins"},
description = "Comma separated origin domain URLs for CORS validation (default: none)")
private final CorsAllowedOriginsProperty graphQLHttpCorsAllowedOrigins =
new CorsAllowedOriginsProperty();
@Option(
names = {"--rpc-http-enabled"},
description = "Set to start the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpEnabled = false;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--rpc-http-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcHttpHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--rpc-http-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcHttpPort = DEFAULT_JSON_RPC_PORT;
// A list of origins URLs that are accepted by the JsonRpcHttpServer (CORS)
@Option(
names = {"--rpc-http-cors-origins"},
description = "Comma separated origin domain URLs for CORS validation (default: none)")
private final CorsAllowedOriginsProperty rpcHttpCorsAllowedOrigins =
new CorsAllowedOriginsProperty();
@Option(
names = {"--rpc-http-api", "--rpc-http-apis"},
paramLabel = "<api name>",
split = ",",
arity = "1..*",
converter = RpcApisConverter.class,
description =
"Comma separated list of APIs to enable on JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Collection<RpcApi> rpcHttpApis = DEFAULT_JSON_RPC_APIS;
@Option(
names = {"--rpc-http-authentication-enabled"},
description =
"Require authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpAuthenticationEnabled = false;
@Option(
names = {"--rpc-ws-enabled"},
description = "Set to start the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsEnabled = false;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--rpc-ws-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--rpc-ws-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcWsPort = DEFAULT_WEBSOCKET_PORT;
@Option(
names = {"--rpc-ws-api", "--rpc-ws-apis"},
paramLabel = "<api name>",
split = ",",
arity = "1..*",
converter = RpcApisConverter.class,
description =
"Comma separated list of APIs to enable on JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final List<RpcApi> rpcWsApis = DEFAULT_JSON_RPC_APIS;
@Option(
names = {"--rpc-ws-authentication-enabled"},
description =
"Require authentication for the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsAuthenticationEnabled = false;
@Option(
names = {"--metrics-enabled"},
description = "Set to start the metrics exporter (default: ${DEFAULT-VALUE})")
private final Boolean isMetricsEnabled = false;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--metrics-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for the metrics exporter to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--metrics-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for the metrics exporter to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPort = DEFAULT_METRICS_PORT;
@Option(
names = {"--metrics-category", "--metrics-categories"},
paramLabel = "<category name>",
split = ",",
arity = "1..*",
description =
"Comma separated list of categories to track metrics for (default: ${DEFAULT-VALUE})")
private final Set<MetricCategory> metricCategories = DEFAULT_METRIC_CATEGORIES;
@Option(
names = {"--metrics-push-enabled"},
description = "Enable the metrics push gateway integration (default: ${DEFAULT-VALUE})")
private final Boolean isMetricsPushEnabled = false;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--metrics-push-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsPushHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--metrics-push-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPushPort = DEFAULT_METRICS_PUSH_PORT;
@Option(
names = {"--metrics-push-interval"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Interval in seconds to push metrics when in push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPushInterval = 15;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--metrics-push-prometheus-job"},
description = "Job name to use when in push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsPrometheusJob = "besu-client";
@Option(
names = {"--host-whitelist"},
paramLabel = "<hostname>[,<hostname>...]... or * or all",
description =
"Comma separated list of hostnames to whitelist for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})",
defaultValue = "localhost,127.0.0.1")
private final JsonRPCWhitelistHostsProperty hostsWhitelist = new JsonRPCWhitelistHostsProperty();
@Option(
names = {"--logging", "-l"},
paramLabel = "<LOG VERBOSITY LEVEL>",
description =
"Logging verbosity levels: OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL (default: ${DEFAULT-VALUE})")
private final Level logLevel = LogManager.getRootLogger().getLevel();
@Option(
names = {"--miner-enabled"},
description = "Set if node will perform mining (default: ${DEFAULT-VALUE})")
private final Boolean isMiningEnabled = false;
@Option(
names = {"--miner-stratum-enabled"},
description = "Set if node will perform Stratum mining (default: ${DEFAULT-VALUE})")
private final Boolean iStratumMiningEnabled = false;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--miner-stratum-host"},
description = "Host for Stratum network mining service (default: ${DEFAULT-VALUE})")
private String stratumNetworkInterface = "0.0.0.0";
@Option(
names = {"--miner-stratum-port"},
description = "Stratum port binding (default: ${DEFAULT-VALUE})")
private final Integer stratumPort = 8008;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
hidden = true,
names = {"--Xminer-stratum-extranonce"},
description = "Extranonce for Stratum network miners (default: ${DEFAULT-VALUE})")
private String stratumExtranonce = "080c";
@Option(
names = {"--miner-coinbase"},
description =
"Account to which mining rewards are paid. You must specify a valid coinbase if "
+ "mining is enabled using --miner-enabled option",
arity = "1")
private final Address coinbase = null;
@Option(
names = {"--min-gas-price"},
description =
"Minimum price (in Wei) offered by a transaction for it to be included in a mined "
+ "block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Wei minTransactionGasPrice = DEFAULT_MIN_TRANSACTION_GAS_PRICE;
@Option(
names = {"--miner-extra-data"},
description =
"A hex string representing the (32) bytes to be included in the extra data "
+ "field of a mined block (default: ${DEFAULT-VALUE})",
arity = "1")
private final BytesValue extraData = DEFAULT_EXTRA_DATA;
@Option(
names = {"--pruning-enabled"},
description =
"Enable disk-space saving optimization that removes old state that is unlikely to be required (default: true if fast sync is enabled, false otherwise)")
private Boolean pruningOverride;
@Option(
names = {"--permissions-nodes-config-file-enabled"},
description = "Enable node level permissions (default: ${DEFAULT-VALUE})")
private final Boolean permissionsNodesEnabled = false;
@Option(
names = {"--permissions-accounts-config-file-enabled"},
description = "Enable account level permissions (default: ${DEFAULT-VALUE})")
private final Boolean permissionsAccountsEnabled = false;
@Option(
names = {"--permissions-nodes-contract-address"},
description = "Address of the node permissioning smart contract",
arity = "1")
private final Address permissionsNodesContractAddress = null;
@Option(
names = {"--permissions-nodes-contract-enabled"},
description = "Enable node level permissions via smart contract (default: ${DEFAULT-VALUE})")
private final Boolean permissionsNodesContractEnabled = false;
@Option(
names = {"--permissions-accounts-contract-address"},
description = "Address of the account permissioning smart contract",
arity = "1")
private final Address permissionsAccountsContractAddress = null;
@Option(
names = {"--permissions-accounts-contract-enabled"},
description =
"Enable account level permissions via smart contract (default: ${DEFAULT-VALUE})")
private final Boolean permissionsAccountsContractEnabled = false;
@Option(
names = {"--privacy-enabled"},
description = "Enable private transactions (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyEnabled = false;
@Option(
names = {"--revert-reason-enabled"},
description =
"Enable passing the revert reason back through TransactionReceipts (default: ${DEFAULT-VALUE})")
private final Boolean isRevertReasonEnabled = false;
@Option(
names = {"--required-blocks", "--required-block"},
paramLabel = "BLOCK=HASH",
description = "Block number and hash peers are required to have.",
arity = "*",
split = ",")
private final Map<Long, Hash> requiredBlocks = new HashMap<>();
@Option(
names = {"--privacy-url"},
description = "The URL on which the enclave is running")
private final URI privacyUrl = PrivacyParameters.DEFAULT_ENCLAVE_URL;
@Option(
names = {"--privacy-precompiled-address"},
description =
"The address to which the privacy pre-compiled contract will be mapped to (default: ${DEFAULT-VALUE})")
private final Integer privacyPrecompiledAddress = Address.PRIVACY;
@Option(
names = {"--privacy-marker-transaction-signing-key-file"},
description =
"The name of a file containing the private key used to sign privacy marker transactions. If unset, each will be signed with a random key.")
private final Path privacyMarkerTransactionSigningKeyPath = null;
@Option(
names = {"--target-gas-limit"},
description =
"Sets target gas limit per block. If set each blocks gas limit will approach this setting over time if the current gas limit is different.")
private final Long targetGasLimit = null;
@Option(
names = {"--tx-pool-max-size"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum number of pending transactions that will be kept in the transaction pool (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer txPoolMaxSize = TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS;
@Option(
names = {"--tx-pool-retention-hours"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum retention period of pending transactions in hours (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pendingTxRetentionPeriod =
TransactionPoolConfiguration.DEFAULT_TX_RETENTION_HOURS;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--key-value-storage"},
description = "Identity for the key-value storage to be used.",
arity = "1")
private String keyValueStorageName = DEFAULT_KEY_VALUE_STORAGE_NAME;
@Option(
names = {"--override-genesis-config"},
paramLabel = "NAME=VALUE",
description = "Overrides configuration values in the genesis file. Use with care.",
arity = "*",
hidden = true,
split = ",")
private final Map<String, String> genesisConfigOverrides =
new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
private EthNetworkConfig ethNetworkConfig;
private JsonRpcConfiguration jsonRpcConfiguration;
private GraphQLConfiguration graphQLConfiguration;
private WebSocketConfiguration webSocketConfiguration;
private MetricsConfiguration metricsConfiguration;
private Optional<PermissioningConfiguration> permissioningConfiguration;
private Collection<EnodeURL> staticNodes;
private BesuController<?> besuController;
private StandaloneCommand standaloneCommands;
private BesuConfiguration pluginCommonConfiguration;
private final Supplier<ObservableMetricsSystem> metricsSystem =
Suppliers.memoize(() -> PrometheusMetricsSystem.init(metricsConfiguration()));
private Vertx vertx;
public BesuCommand(
final Logger logger,
final RlpBlockImporter rlpBlockImporter,
final JsonBlockImporterFactory jsonBlockImporterFactory,
final RlpBlockExporterFactory rlpBlockExporterFactory,
final RunnerBuilder runnerBuilder,
final BesuController.Builder controllerBuilderFactory,
final BesuPluginContextImpl besuPluginContext,
final Map<String, String> environment) {
this(
logger,
rlpBlockImporter,
jsonBlockImporterFactory,
rlpBlockExporterFactory,
runnerBuilder,
controllerBuilderFactory,
besuPluginContext,
environment,
new StorageServiceImpl());
}
@VisibleForTesting
protected BesuCommand(
final Logger logger,
final RlpBlockImporter rlpBlockImporter,
final JsonBlockImporterFactory jsonBlockImporterFactory,
final RlpBlockExporterFactory rlpBlockExporterFactory,
final RunnerBuilder runnerBuilder,
final BesuController.Builder controllerBuilderFactory,
final BesuPluginContextImpl besuPluginContext,
final Map<String, String> environment,
final StorageServiceImpl storageService) {
this.logger = logger;
this.rlpBlockImporter = rlpBlockImporter;
this.rlpBlockExporterFactory = rlpBlockExporterFactory;
this.jsonBlockImporterFactory = jsonBlockImporterFactory;
this.runnerBuilder = runnerBuilder;
this.controllerBuilderFactory = controllerBuilderFactory;
this.besuPluginContext = besuPluginContext;
this.environment = environment;
this.storageService = storageService;
}
public void parse(
final AbstractParseResultHandler<List<Object>> resultHandler,
final BesuExceptionHandler exceptionHandler,
final InputStream in,
final String... args) {
commandLine =
new CommandLine(this, new BesuCommandCustomFactory(besuPluginContext))
.setCaseInsensitiveEnumValuesAllowed(true);
handleStandaloneCommand()
.addSubCommands(resultHandler, in)
.registerConverters()
.handleUnstableOptions()
.preparePlugins()
.parse(resultHandler, exceptionHandler, args);
}
@Override
public void run() {
try {
prepareLogging();
logger.info("Starting Besu version: {}", BesuInfo.nodeName(identityString));
// Need to create vertx after cmdline has been parsed, such that metricSystem is configurable
vertx = createVertx(createVertxOptions(metricsSystem.get()));
validateOptions().configure().controller().startPlugins().startSynchronization();
} catch (final Exception e) {
throw new ParameterException(this.commandLine, e.getMessage(), e);
}
}
private void addConfigurationService() {
if (pluginCommonConfiguration == null) {
final Path dataDir = dataDir();
pluginCommonConfiguration =
new BesuConfigurationImpl(dataDir, dataDir.resolve(DATABASE_PATH));
besuPluginContext.addService(BesuConfiguration.class, pluginCommonConfiguration);
}
}
@VisibleForTesting
void setBesuConfiguration(final BesuConfiguration pluginCommonConfiguration) {
this.pluginCommonConfiguration = pluginCommonConfiguration;
}
private BesuCommand handleStandaloneCommand() {
standaloneCommands = new StandaloneCommand();
if (isFullInstantiation()) {
commandLine.addMixin("standaloneCommands", standaloneCommands);
}
return this;
}
private BesuCommand addSubCommands(
final AbstractParseResultHandler<List<Object>> resultHandler, final InputStream in) {
commandLine.addSubcommand(
BlocksSubCommand.COMMAND_NAME,
new BlocksSubCommand(
rlpBlockImporter,
jsonBlockImporterFactory,
rlpBlockExporterFactory,
resultHandler.out()));
commandLine.addSubcommand(
PublicKeySubCommand.COMMAND_NAME,
new PublicKeySubCommand(resultHandler.out(), getKeyLoader()));
commandLine.addSubcommand(
PasswordSubCommand.COMMAND_NAME, new PasswordSubCommand(resultHandler.out()));
commandLine.addSubcommand(RetestethSubCommand.COMMAND_NAME, new RetestethSubCommand());
commandLine.addSubcommand(
RLPSubCommand.COMMAND_NAME, new RLPSubCommand(resultHandler.out(), in));
commandLine.addSubcommand(
OperatorSubCommand.COMMAND_NAME, new OperatorSubCommand(resultHandler.out()));
return this;
}
private BesuCommand registerConverters() {
commandLine.registerConverter(Address.class, Address::fromHexStringStrict);
commandLine.registerConverter(BytesValue.class, BytesValue::fromHexString);
commandLine.registerConverter(Level.class, Level::valueOf);
commandLine.registerConverter(SyncMode.class, SyncMode::fromString);
commandLine.registerConverter(UInt256.class, (arg) -> UInt256.of(new BigInteger(arg)));
commandLine.registerConverter(Wei.class, (arg) -> Wei.of(Long.parseUnsignedLong(arg)));
commandLine.registerConverter(PositiveNumber.class, PositiveNumber::fromString);
commandLine.registerConverter(Hash.class, Hash::fromHexString);
commandLine.registerConverter(Optional.class, Optional::of);
metricCategoryConverter.addCategories(BesuMetricCategory.class);
metricCategoryConverter.addCategories(StandardMetricCategory.class);
commandLine.registerConverter(MetricCategory.class, metricCategoryConverter);
return this;
}
private BesuCommand handleUnstableOptions() {
// Add unstable options
final ImmutableMap.Builder<String, Object> unstableOptionsBuild = ImmutableMap.builder();
final ImmutableMap<String, Object> unstableOptions =
unstableOptionsBuild
.put("Ethereum Wire Protocol", ethProtocolOptions)
.put("Metrics", metricsCLIOptions)
.put("P2P Network", networkingOptions)
.put("Synchronizer", synchronizerOptions)
.put("TransactionPool", transactionPoolOptions)
.put("Pruner", prunerOptions)
.build();
UnstableOptionsSubCommand.createUnstableOptions(commandLine, unstableOptions);
return this;
}
private BesuCommand preparePlugins() {
besuPluginContext.addService(PicoCLIOptions.class, new PicoCLIOptionsImpl(commandLine));
besuPluginContext.addService(StorageService.class, storageService);
besuPluginContext.addService(MetricCategoryRegistry.class, metricCategoryRegistry);
// register built-in plugins
new RocksDBPlugin().register(besuPluginContext);
besuPluginContext.registerPlugins(pluginsDir());
metricCategoryRegistry
.getMetricCategories()
.forEach(metricCategoryConverter::addRegistryCategory);
return this;
}
private void parse(
final AbstractParseResultHandler<List<Object>> resultHandler,
final BesuExceptionHandler exceptionHandler,
final String... args) {
// Create a handler that will search for a config file option and use it for
// default values
// and eventually it will run regular parsing of the remaining options.
final ConfigOptionSearchAndRunHandler configParsingHandler =
new ConfigOptionSearchAndRunHandler(
resultHandler, exceptionHandler, CONFIG_FILE_OPTION_NAME, environment, isDocker);
commandLine.parseWithHandlers(configParsingHandler, exceptionHandler, args);
}
private void startSynchronization() {
synchronize(
besuController,
p2pEnabled,
peerDiscoveryEnabled,
ethNetworkConfig,
maxPeers,
p2pHost,
p2pInterface,
p2pPort,
graphQLConfiguration,
jsonRpcConfiguration,
webSocketConfiguration,
metricsConfiguration,
permissioningConfiguration,
staticNodes);
}
private BesuCommand startPlugins() {
besuPluginContext.addService(
BesuEvents.class,
new BesuEventsImpl(
besuController.getProtocolContext().getBlockchain(),
besuController.getProtocolManager().getBlockBroadcaster(),
besuController.getTransactionPool(),
besuController.getSyncState()));
besuPluginContext.addService(MetricsSystem.class, getMetricsSystem());
besuController.getAdditionalPluginServices().appendPluginServices(besuPluginContext);
besuPluginContext.startPlugins();
return this;
}
private void prepareLogging() {
// set log level per CLI flags
if (logLevel != null) {
System.out.println("Setting logging level to " + logLevel.name());
Configurator.setAllLevels("", logLevel);
}
}
private BesuCommand validateOptions() {
issueOptionWarnings();
validateP2PInterface(p2pInterface);
validateMiningParams();
return this;
}
@SuppressWarnings("ConstantConditions")
private void validateMiningParams() {
if (isMiningEnabled && coinbase == null) {
throw new ParameterException(
this.commandLine,
"Unable to mine without a valid coinbase. Either disable mining (remove --miner-enabled)"
+ "or specify the beneficiary of mining (via --miner-coinbase <Address>)");
}
if (!isMiningEnabled && iStratumMiningEnabled) {
throw new ParameterException(
this.commandLine,
"Unable to mine with Stratum if mining is disabled. Either disable Stratum mining (remove --miner-stratum-enabled)"
+ "or specify mining is enabled (--miner-enabled)");
}
}
protected void validateP2PInterface(final String p2pInterface) {
final String failMessage = "The provided --p2p-interface is not available: " + p2pInterface;
try {
if (!NetworkUtility.isNetworkInterfaceAvailable(p2pInterface)) {
throw new ParameterException(commandLine, failMessage);
}
} catch (final UnknownHostException | SocketException e) {
throw new ParameterException(commandLine, failMessage, e);
}
}
private void issueOptionWarnings() {
// Check that P2P options are able to work
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--p2p-enabled",
!p2pEnabled,
asList(
"--bootnodes",
"--discovery-enabled",
"--max-peers",
"--banned-node-id",
"--banned-node-ids",
"--p2p-host",
"--p2p-interface",
"--p2p-port",
"--remote-connections-max-percentage"));
// Check that mining options are able to work
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--miner-enabled",
!isMiningEnabled,
asList(
"--miner-coinbase",
"--min-gas-price",
"--miner-extra-data",
"--miner-stratum-enabled"));
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--sync-mode",
!SyncMode.FAST.equals(syncMode),
singletonList("--fast-sync-min-peers"));
}
private BesuCommand configure() throws Exception {
ethNetworkConfig = updateNetworkConfig(getNetwork());
jsonRpcConfiguration = jsonRpcConfiguration();
graphQLConfiguration = graphQLConfiguration();
webSocketConfiguration = webSocketConfiguration();
permissioningConfiguration = permissioningConfiguration();
staticNodes = loadStaticNodes();
logger.info("Connecting to {} static nodes.", staticNodes.size());
logger.trace("Static Nodes = {}", staticNodes);
final List<URI> enodeURIs =
ethNetworkConfig.getBootNodes().stream().map(EnodeURL::toURI).collect(Collectors.toList());
permissioningConfiguration
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(p -> ensureAllNodesAreInWhitelist(enodeURIs, p));
permissioningConfiguration
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(
p ->
ensureAllNodesAreInWhitelist(
staticNodes.stream().map(EnodeURL::toURI).collect(Collectors.toList()), p));
metricsConfiguration = metricsConfiguration();
return this;
}
private NetworkName getNetwork() {
// noinspection ConstantConditions network is not always null but injected by
// PicoCLI if used
return network == null ? MAINNET : network;
}
private void ensureAllNodesAreInWhitelist(
final Collection<URI> enodeAddresses,
final LocalPermissioningConfiguration permissioningConfiguration) {
try {
PermissioningConfigurationValidator.areAllNodesAreInWhitelist(
enodeAddresses, permissioningConfiguration);
} catch (final Exception e) {
throw new ParameterException(this.commandLine, e.getMessage());
}
}
private BesuCommand controller() {
besuController = buildController();
return this;
}
public BesuController<?> buildController() {
try {
return getControllerBuilder().build();
} catch (final Exception e) {
throw new ExecutionException(this.commandLine, e.getMessage(), e);
}
}
public BesuControllerBuilder<?> getControllerBuilder() {
try {
addConfigurationService();
return controllerBuilderFactory
.fromEthNetworkConfig(updateNetworkConfig(getNetwork()), genesisConfigOverrides)
.synchronizerConfiguration(buildSyncConfig())
.ethProtocolConfiguration(ethProtocolOptions.toDomainObject())
.dataDirectory(dataDir())
.miningParameters(
new MiningParameters(
coinbase,
minTransactionGasPrice,
extraData,
isMiningEnabled,
iStratumMiningEnabled,
stratumNetworkInterface,
stratumPort,
stratumExtranonce))
.transactionPoolConfiguration(buildTransactionPoolConfiguration())
.nodePrivateKeyFile(nodePrivateKeyFile())
.metricsSystem(metricsSystem.get())
.privacyParameters(privacyParameters())
.clock(Clock.systemUTC())
.isRevertReasonEnabled(isRevertReasonEnabled)
.storageProvider(keyStorageProvider(keyValueStorageName))
.isPruningEnabled(isPruningEnabled())
.pruningConfiguration(prunerOptions.toDomainObject())
.genesisConfigOverrides(genesisConfigOverrides)
.targetGasLimit(targetGasLimit == null ? Optional.empty() : Optional.of(targetGasLimit))
.requiredBlocks(requiredBlocks);
} catch (final IOException e) {
throw new ExecutionException(this.commandLine, "Invalid path", e);
}
}
private GraphQLConfiguration graphQLConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--graphql-http-enabled",
!isRpcHttpEnabled,
asList("--graphql-http-cors-origins", "--graphql-http-host", "--graphql-http-port"));
final GraphQLConfiguration graphQLConfiguration = GraphQLConfiguration.createDefault();
graphQLConfiguration.setEnabled(isGraphQLHttpEnabled);
graphQLConfiguration.setHost(graphQLHttpHost);
graphQLConfiguration.setPort(graphQLHttpPort);
graphQLConfiguration.setHostsWhitelist(hostsWhitelist);
graphQLConfiguration.setCorsAllowedDomains(graphQLHttpCorsAllowedOrigins);
return graphQLConfiguration;
}
private JsonRpcConfiguration jsonRpcConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-enabled",
!isRpcHttpEnabled,
asList(
"--rpc-http-api",
"--rpc-http-apis",
"--rpc-http-cors-origins",
"--rpc-http-host",
"--rpc-http-port",
"--rpc-http-authentication-enabled",
"--rpc-http-authentication-credentials-file",
"--rpc-http-authentication-public-key-file"));
if (isRpcHttpAuthenticationEnabled
&& rpcHttpAuthenticationCredentialsFile() == null
&& rpcHttpAuthenticationPublicKeyFile() == null) {
throw new ParameterException(
commandLine,
"Unable to authenticate JSON-RPC HTTP endpoint without a supplied credentials file or authentication public key file");
}
final JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault();
jsonRpcConfiguration.setEnabled(isRpcHttpEnabled);
jsonRpcConfiguration.setHost(rpcHttpHost);
jsonRpcConfiguration.setPort(rpcHttpPort);
jsonRpcConfiguration.setCorsAllowedDomains(rpcHttpCorsAllowedOrigins);
jsonRpcConfiguration.setRpcApis(rpcHttpApis.stream().distinct().collect(Collectors.toList()));
jsonRpcConfiguration.setHostsWhitelist(hostsWhitelist);
jsonRpcConfiguration.setAuthenticationEnabled(isRpcHttpAuthenticationEnabled);
jsonRpcConfiguration.setAuthenticationCredentialsFile(rpcHttpAuthenticationCredentialsFile());
jsonRpcConfiguration.setAuthenticationPublicKeyFile(rpcHttpAuthenticationPublicKeyFile());
return jsonRpcConfiguration;
}
private WebSocketConfiguration webSocketConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-ws-enabled",
!isRpcWsEnabled,
asList(
"--rpc-ws-api",
"--rpc-ws-apis",
"--rpc-ws-host",
"--rpc-ws-port",
"--rpc-ws-authentication-enabled",
"--rpc-ws-authentication-credentials-file",
"--rpc-ws-authentication-public-key-file"));
if (isRpcWsAuthenticationEnabled
&& rpcWsAuthenticationCredentialsFile() == null
&& rpcWsAuthenticationPublicKeyFile() == null) {
throw new ParameterException(
commandLine,
"Unable to authenticate JSON-RPC WebSocket endpoint without a supplied credentials file or authentication public key file");
}
final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault();
webSocketConfiguration.setEnabled(isRpcWsEnabled);
webSocketConfiguration.setHost(rpcWsHost);
webSocketConfiguration.setPort(rpcWsPort);
webSocketConfiguration.setRpcApis(rpcWsApis);
webSocketConfiguration.setAuthenticationEnabled(isRpcWsAuthenticationEnabled);
webSocketConfiguration.setAuthenticationCredentialsFile(rpcWsAuthenticationCredentialsFile());
webSocketConfiguration.setHostsWhitelist(hostsWhitelist);
webSocketConfiguration.setAuthenticationPublicKeyFile(rpcWsAuthenticationPublicKeyFile());
return webSocketConfiguration;
}
public MetricsConfiguration metricsConfiguration() {
if (isMetricsEnabled && isMetricsPushEnabled) {
throw new ParameterException(
this.commandLine,
"--metrics-enabled option and --metrics-push-enabled option can't be used at the same "
+ "time. Please refer to CLI reference for more details about this constraint.");
}
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--metrics-enabled",
!isMetricsEnabled,
asList("--metrics-host", "--metrics-port"));
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--metrics-push-enabled",
!isMetricsPushEnabled,
asList(
"--metrics-push-host",
"--metrics-push-port",
"--metrics-push-interval",
"--metrics-push-prometheus-job"));
return metricsCLIOptions
.toDomainObject()
.enabled(isMetricsEnabled)
.host(metricsHost)
.port(metricsPort)
.metricCategories(metricCategories)
.pushEnabled(isMetricsPushEnabled)
.pushHost(metricsPushHost)
.pushPort(metricsPushPort)
.pushInterval(metricsPushInterval)
.hostsWhitelist(hostsWhitelist)
.prometheusJob(metricsPrometheusJob)
.build();
}
private Optional<PermissioningConfiguration> permissioningConfiguration() throws Exception {
if (!(localPermissionsEnabled() || contractPermissionsEnabled())) {
if (rpcHttpApis.contains(RpcApis.PERM) || rpcWsApis.contains(RpcApis.PERM)) {
logger.warn(
"Permissions are disabled. Cannot enable PERM APIs when not using Permissions.");
}
return Optional.empty();
}
final Optional<LocalPermissioningConfiguration> localPermissioningConfigurationOptional;
if (localPermissionsEnabled()) {
final Optional<String> nodePermissioningConfigFile =
Optional.ofNullable(nodePermissionsConfigFile());
final Optional<String> accountPermissioningConfigFile =
Optional.ofNullable(accountsPermissionsConfigFile());
final LocalPermissioningConfiguration localPermissioningConfiguration =
PermissioningConfigurationBuilder.permissioningConfiguration(
permissionsNodesEnabled,
nodePermissioningConfigFile.orElse(getDefaultPermissioningFilePath()),
permissionsAccountsEnabled,
accountPermissioningConfigFile.orElse(getDefaultPermissioningFilePath()));
localPermissioningConfigurationOptional = Optional.of(localPermissioningConfiguration);
} else {
if (nodePermissionsConfigFile() != null && !permissionsNodesEnabled) {
logger.warn(
"Node permissioning config file set {} but no permissions enabled",
nodePermissionsConfigFile());
}
if (accountsPermissionsConfigFile() != null && !permissionsAccountsEnabled) {
logger.warn(
"Account permissioning config file set {} but no permissions enabled",
accountsPermissionsConfigFile());
}
localPermissioningConfigurationOptional = Optional.empty();
}
final SmartContractPermissioningConfiguration smartContractPermissioningConfiguration =
SmartContractPermissioningConfiguration.createDefault();
if (permissionsNodesContractEnabled) {
if (permissionsNodesContractAddress == null) {
throw new ParameterException(
this.commandLine,
"No node permissioning contract address specified. Cannot enable smart contract based node permissioning.");
} else {
smartContractPermissioningConfiguration.setSmartContractNodeWhitelistEnabled(
permissionsNodesContractEnabled);
smartContractPermissioningConfiguration.setNodeSmartContractAddress(
permissionsNodesContractAddress);
}
} else if (permissionsNodesContractAddress != null) {
logger.warn(
"Node permissioning smart contract address set {} but smart contract node permissioning is disabled.",
permissionsNodesContractAddress);
}
if (permissionsAccountsContractEnabled) {
if (permissionsAccountsContractAddress == null) {
throw new ParameterException(
this.commandLine,
"No account permissioning contract address specified. Cannot enable smart contract based account permissioning.");
} else {
smartContractPermissioningConfiguration.setSmartContractAccountWhitelistEnabled(
permissionsAccountsContractEnabled);
smartContractPermissioningConfiguration.setAccountSmartContractAddress(
permissionsAccountsContractAddress);
}
} else if (permissionsAccountsContractAddress != null) {
logger.warn(
"Account permissioning smart contract address set {} but smart contract account permissioning is disabled.",
permissionsAccountsContractAddress);
}
final PermissioningConfiguration permissioningConfiguration =
new PermissioningConfiguration(
localPermissioningConfigurationOptional,
Optional.of(smartContractPermissioningConfiguration));
return Optional.of(permissioningConfiguration);
}
private boolean localPermissionsEnabled() {
return permissionsAccountsEnabled || permissionsNodesEnabled;
}
private boolean contractPermissionsEnabled() {
return permissionsNodesContractEnabled || permissionsAccountsContractEnabled;
}
private PrivacyParameters privacyParameters() throws IOException {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--privacy-enabled",
!isPrivacyEnabled,
asList("--privacy-url", "--privacy-public-key-file", "--privacy-precompiled-address"));
final PrivacyParameters.Builder privacyParametersBuilder = new PrivacyParameters.Builder();
if (isPrivacyEnabled) {
final String errorSuffix = "cannot be enabled with privacy.";
if (syncMode == SyncMode.FAST) {
throw new ParameterException(commandLine, String.format("%s %s", "Fast sync", errorSuffix));
}
if (isPruningEnabled()) {
throw new ParameterException(commandLine, String.format("%s %s", "Pruning", errorSuffix));
}
privacyParametersBuilder.setEnabled(true);
privacyParametersBuilder.setEnclaveUrl(privacyUrl);
if (privacyPublicKeyFile() != null) {
privacyParametersBuilder.setEnclavePublicKeyUsingFile(privacyPublicKeyFile());
} else {
throw new ParameterException(
commandLine, "Please specify Enclave public key file path to enable privacy");
}
privacyParametersBuilder.setPrivacyAddress(privacyPrecompiledAddress);
privacyParametersBuilder.setPrivateKeyPath(privacyMarkerTransactionSigningKeyPath);
privacyParametersBuilder.setStorageProvider(
privacyKeyStorageProvider(keyValueStorageName + "-privacy"));
privacyParametersBuilder.setEnclaveFactory(new EnclaveFactory(vertx));
} else {
if (anyPrivacyApiEnabled()) {
logger.warn("Privacy is disabled. Cannot use EEA/PRIV API methods when not using Privacy.");
}
}
return privacyParametersBuilder.build();
}
private boolean anyPrivacyApiEnabled() {
return rpcHttpApis.contains(RpcApis.EEA)
|| rpcWsApis.contains(RpcApis.EEA)
|| rpcHttpApis.contains(RpcApis.PRIV)
|| rpcWsApis.contains(RpcApis.PRIV);
}
private PrivacyKeyValueStorageProvider privacyKeyStorageProvider(final String name) {
return new PrivacyKeyValueStorageProviderBuilder()
.withStorageFactory(
(PrivacyKeyValueStorageFactory)
storageService
.getByName(name)
.orElseThrow(
() ->
new StorageException(
"No KeyValueStorageFactory found for key: " + name)))
.withCommonConfiguration(pluginCommonConfiguration)
.withMetricsSystem(getMetricsSystem())
.build();
}
private KeyValueStorageProvider keyStorageProvider(final String name) {
return new KeyValueStorageProviderBuilder()
.withStorageFactory(
storageService
.getByName(name)
.orElseThrow(
() -> new StorageException("No KeyValueStorageFactory found for key: " + name)))
.withCommonConfiguration(pluginCommonConfiguration)
.withMetricsSystem(getMetricsSystem())
.build();
}
private SynchronizerConfiguration buildSyncConfig() {
return synchronizerOptions
.toDomainObject()
.syncMode(syncMode)
.fastSyncMinimumPeerCount(fastSyncMinPeerCount)
.build();
}
private TransactionPoolConfiguration buildTransactionPoolConfiguration() {
return transactionPoolOptions
.toDomainObject()
.txPoolMaxSize(txPoolMaxSize)
.pendingTxRetentionPeriod(pendingTxRetentionPeriod)
.build();
}
private boolean isPruningEnabled() {
return Optional.ofNullable(pruningOverride).orElse(syncMode == SyncMode.FAST);
}
// Blockchain synchronisation from peers.
private void synchronize(
final BesuController<?> controller,
final boolean p2pEnabled,
final boolean peerDiscoveryEnabled,
final EthNetworkConfig ethNetworkConfig,
final int maxPeers,
final String p2pAdvertisedHost,
final String p2pListenInterface,
final int p2pListenPort,
final GraphQLConfiguration graphQLConfiguration,
final JsonRpcConfiguration jsonRpcConfiguration,
final WebSocketConfiguration webSocketConfiguration,
final MetricsConfiguration metricsConfiguration,
final Optional<PermissioningConfiguration> permissioningConfiguration,
final Collection<EnodeURL> staticNodes) {
checkNotNull(runnerBuilder);
permissioningConfiguration.ifPresent(runnerBuilder::permissioningConfiguration);
final ObservableMetricsSystem metricsSystem = this.metricsSystem.get();
final Runner runner =
runnerBuilder
.vertx(vertx)
.besuController(controller)
.p2pEnabled(p2pEnabled)
.natMethod(natMethod)
.discovery(peerDiscoveryEnabled)
.ethNetworkConfig(ethNetworkConfig)
.p2pAdvertisedHost(p2pAdvertisedHost)
.p2pListenInterface(p2pListenInterface)
.p2pListenPort(p2pListenPort)
.maxPeers(maxPeers)
.limitRemoteWireConnectionsEnabled(isLimitRemoteWireConnectionsEnabled)
.fractionRemoteConnectionsAllowed(
Fraction.fromPercentage(maxRemoteConnectionsPercentage).getValue())
.networkingConfiguration(networkingOptions.toDomainObject())
.graphQLConfiguration(graphQLConfiguration)
.jsonRpcConfiguration(jsonRpcConfiguration)
.webSocketConfiguration(webSocketConfiguration)
.dataDir(dataDir())
.bannedNodeIds(bannedNodeIds)
.metricsSystem(metricsSystem)
.metricsConfiguration(metricsConfiguration)
.staticNodes(staticNodes)
.identityString(identityString)
.build();
addShutdownHook(runner);
runner.start();
runner.awaitStop();
}
protected Vertx createVertx(final VertxOptions vertxOptions) {
return Vertx.vertx(vertxOptions);
}
private VertxOptions createVertxOptions(final MetricsSystem metricsSystem) {
return new VertxOptions()
.setMetricsOptions(
new MetricsOptions()
.setEnabled(true)
.setFactory(new VertxMetricsAdapterFactory(metricsSystem)));
}
private void addShutdownHook(final Runner runner) {
Runtime.getRuntime()
.addShutdownHook(
new Thread(
() -> {
try {
besuPluginContext.stopPlugins();
runner.close();
LogManager.shutdown();
} catch (final Exception e) {
logger.error("Failed to stop Besu");
}
}));
}
// Used to discover the default IP of the client.
// Loopback IP is used by default as this is how smokeTests require it to be
// and it's probably a good security behaviour to default only on the localhost.
private InetAddress autoDiscoverDefaultIP() {
if (autoDiscoveredDefaultIP != null) {
return autoDiscoveredDefaultIP;
}
autoDiscoveredDefaultIP = InetAddress.getLoopbackAddress();
return autoDiscoveredDefaultIP;
}
private EthNetworkConfig updateNetworkConfig(final NetworkName network) {
final EthNetworkConfig.Builder builder =
new EthNetworkConfig.Builder(EthNetworkConfig.getNetworkConfig(network));
// custom genesis file use comes with specific default values for the genesis
// file itself
// but also for the network id and the bootnodes list.
final File genesisFile = genesisFile();
if (genesisFile != null) {
// noinspection ConstantConditions network is not always null but injected by
// PicoCLI if used
if (this.network != null) {
// We check if network option was really provided by user and not only looking
// at the
// default value.
// if user provided it and provided the genesis file option at the same time, it
// raises a
// conflict error
throw new ParameterException(
this.commandLine,
"--network option and --genesis-file option can't be used at the same time. Please "
+ "refer to CLI reference for more details about this constraint.");
}
builder.setGenesisConfig(genesisConfig());
if (networkId == null) {
// if no network id option is defined on the CLI we have to set a default value
// from the
// genesis file.
// We do the genesis parsing only in this case as we already have network id
// constants
// for known networks to speed up the process.
// Also we have to parse the genesis as we don't already have a parsed version
// at this
// stage.
// If no chain id is found in the genesis as it's an optional, we use mainnet
// network id.
try {
final GenesisConfigFile genesisConfigFile = GenesisConfigFile.fromConfig(genesisConfig());
builder.setNetworkId(
genesisConfigFile
.getConfigOptions(genesisConfigOverrides)
.getChainId()
.orElse(EthNetworkConfig.getNetworkConfig(MAINNET).getNetworkId()));
} catch (final DecodeException e) {
throw new ParameterException(
this.commandLine, String.format("Unable to parse genesis file %s.", genesisFile), e);
} catch (final ArithmeticException e) {
throw new ParameterException(
this.commandLine,
"No networkId specified and chainId in "
+ "genesis file is too large to be used as a networkId");
}
}
if (bootNodes == null) {
// We default to an empty bootnodes list if the option is not provided on CLI
// because
// mainnet bootnodes won't work as the default value for a custom genesis,
// so it's better to have an empty list as default value that forces to create a
// custom one
// than a useless one that may make user think that it can work when it can't.
builder.setBootNodes(new ArrayList<>());
}
}
if (networkId != null) {
builder.setNetworkId(networkId);
}
if (bootNodes != null) {
builder.setBootNodes(bootNodes);
}
return builder.build();
}
private String genesisConfig() {
try {
return Resources.toString(genesisFile().toURI().toURL(), UTF_8);
} catch (final IOException e) {
throw new ParameterException(
this.commandLine, String.format("Unable to load genesis file %s.", genesisFile()), e);
}
}
private File genesisFile() {
if (isFullInstantiation()) {
return standaloneCommands.genesisFile;
} else if (isDocker) {
final File genesisFile = new File(DOCKER_GENESIS_LOCATION);
if (genesisFile.exists()) {
return genesisFile;
} else {
return null;
}
} else {
return null;
}
}
public Path dataDir() {
if (isFullInstantiation()) {
return standaloneCommands.dataPath.toAbsolutePath();
} else if (isDocker) {
return Paths.get(DOCKER_DATADIR_LOCATION);
} else {
return getDefaultBesuDataPath(this);
}
}
private Path pluginsDir() {
if (isFullInstantiation()) {
final String pluginsDir = System.getProperty("besu.plugins.dir");
if (pluginsDir == null) {
return new File(System.getProperty("besu.home", "."), "plugins").toPath();
} else {
return new File(pluginsDir).toPath();
}
} else if (isDocker) {
return Paths.get(DOCKER_PLUGINSDIR_LOCATION);
} else {
return null; // null means no plugins
}
}
public File nodePrivateKeyFile() {
File nodePrivateKeyFile = null;
if (isFullInstantiation()) {
nodePrivateKeyFile = standaloneCommands.nodePrivateKeyFile;
}
return nodePrivateKeyFile != null
? nodePrivateKeyFile
: KeyPairUtil.getDefaultKeyFile(dataDir());
}
private File privacyPublicKeyFile() {
if (isDocker) {
final File keyFile = new File(DOCKER_PRIVACY_PUBLIC_KEY_FILE);
if (keyFile.exists()) {
return keyFile;
} else {
return null;
}
} else {
return standaloneCommands.privacyPublicKeyFile;
}
}
private String rpcHttpAuthenticationCredentialsFile() {
String filename = null;
if (isFullInstantiation()) {
filename = standaloneCommands.rpcHttpAuthenticationCredentialsFile;
} else if (isDocker) {
final File authFile = new File(DOCKER_RPC_HTTP_AUTHENTICATION_CREDENTIALS_FILE_LOCATION);
if (authFile.exists()) {
filename = authFile.getAbsolutePath();
}
}
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "HTTP");
}
return filename;
}
private String rpcWsAuthenticationCredentialsFile() {
String filename = null;
if (isFullInstantiation()) {
filename = standaloneCommands.rpcWsAuthenticationCredentialsFile;
} else if (isDocker) {
final File authFile = new File(DOCKER_RPC_WS_AUTHENTICATION_CREDENTIALS_FILE_LOCATION);
if (authFile.exists()) {
filename = authFile.getAbsolutePath();
}
}
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "WS");
}
return filename;
}
private File rpcHttpAuthenticationPublicKeyFile() {
if (isDocker) {
final File keyFile = new File(DOCKER_RPC_HTTP_AUTHENTICATION_PUBLIC_KEY_FILE_LOCATION);
return keyFile.exists() ? keyFile : null;
} else {
return standaloneCommands.rpcHttpAuthenticationPublicKeyFile;
}
}
private File rpcWsAuthenticationPublicKeyFile() {
if (isDocker) {
final File keyFile = new File(DOCKER_RPC_WS_AUTHENTICATION_PUBLIC_KEY_FILE_LOCATION);
return keyFile.exists() ? keyFile : null;
} else {
return standaloneCommands.rpcWsAuthenticationPublicKeyFile;
}
}
private String nodePermissionsConfigFile() {
return permissionsConfigFile(standaloneCommands.nodePermissionsConfigFile);
}
private String accountsPermissionsConfigFile() {
return permissionsConfigFile(standaloneCommands.accountPermissionsConfigFile);
}
private String permissionsConfigFile(final String permissioningFilename) {
String filename = null;
if (isFullInstantiation()) {
filename = permissioningFilename;
} else if (isDocker) {
final File file = new File(DOCKER_PERMISSIONS_CONFIG_FILE_LOCATION);
if (file.exists()) {
filename = file.getAbsolutePath();
}
}
return filename;
}
private String getDefaultPermissioningFilePath() {
return dataDir().toAbsolutePath()
+ System.getProperty("file.separator")
+ DefaultCommandValues.PERMISSIONING_CONFIG_LOCATION;
}
private boolean isFullInstantiation() {
return !isDocker;
}
public MetricsSystem getMetricsSystem() {
return metricsSystem.get();
}
private Set<EnodeURL> loadStaticNodes() throws IOException {
final String staticNodesFilename = "static-nodes.json";
final Path staticNodesPath = dataDir().resolve(staticNodesFilename);
return StaticNodesParser.fromPath(staticNodesPath);
}
public BesuExceptionHandler exceptionHandler() {
return new BesuExceptionHandler(this::getLogLevel);
}
private Level getLogLevel() {
return logLevel;
}
}
| 1 | 20,776 | We probably don't need to print the msg with e.getMessage(). It will be printed by the logger since we are passing the exception as a parameter. | hyperledger-besu | java |
@@ -29,17 +29,6 @@ from molecule.verifier import testinfra
from molecule.verifier.lint import flake8
[email protected]
-def _patched_testinfra_get_tests(mocker):
- m = mocker.patch('molecule.verifier.testinfra.Testinfra._get_tests')
- m.return_value = [
- 'foo.py',
- 'bar.py',
- ]
-
- return m
-
-
@pytest.fixture
def _verifier_section_data():
return { | 1 | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import pytest
import sh
from molecule import config
from molecule import util
from molecule.verifier import testinfra
from molecule.verifier.lint import flake8
@pytest.fixture
def _patched_testinfra_get_tests(mocker):
m = mocker.patch('molecule.verifier.testinfra.Testinfra._get_tests')
m.return_value = [
'foo.py',
'bar.py',
]
return m
@pytest.fixture
def _verifier_section_data():
return {
'verifier': {
'name':
'testinfra',
'options': {
'foo': 'bar',
'v': True,
'verbose': True,
},
'additional_files_or_dirs': [
'file1.py',
'file2.py',
'match*.py',
'dir/*',
],
'env': {
'FOO': 'bar',
},
'lint': {
'name': 'flake8',
},
}
}
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
@pytest.fixture
def _instance(patched_config_validate, config_instance):
return testinfra.Testinfra(config_instance)
@pytest.fixture
def inventory_file(_instance):
return _instance._config.provisioner.inventory_file
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_default_options_property(inventory_file, _instance):
x = {
'connection': 'ansible',
'ansible-inventory': inventory_file,
'p': 'no:cacheprovider',
}
assert x == _instance.default_options
def test_default_options_property_updates_debug(inventory_file, _instance):
_instance._config.args = {'debug': True}
x = {
'connection': 'ansible',
'ansible-inventory': inventory_file,
'debug': True,
'vvv': True,
'p': 'no:cacheprovider',
}
assert x == _instance.default_options
def test_default_options_property_updates_sudo(inventory_file, _instance,
_patched_testinfra_get_tests):
_instance._config.args = {'sudo': True}
x = {
'connection': 'ansible',
'ansible-inventory': inventory_file,
'sudo': True,
'p': 'no:cacheprovider',
}
assert x == _instance.default_options
def test_default_env_property(_instance):
assert 'MOLECULE_FILE' in _instance.default_env
assert 'MOLECULE_INVENTORY_FILE' in _instance.default_env
assert 'MOLECULE_SCENARIO_DIRECTORY' in _instance.default_env
assert 'MOLECULE_INSTANCE_CONFIG' in _instance.default_env
@pytest.mark.parametrize(
'config_instance', ['_verifier_section_data'], indirect=True)
def test_additional_files_or_dirs_property(_instance):
tests_directory = _instance._config.verifier.directory
file1_file = os.path.join(tests_directory, 'file1.py')
file2_file = os.path.join(tests_directory, 'file2.py')
match1_file = os.path.join(tests_directory, 'match1.py')
match2_file = os.path.join(tests_directory, 'match2.py')
test_subdir = os.path.join(tests_directory, 'dir')
test_subdir_file = os.path.join(test_subdir, 'test_subdir_file.py')
os.mkdir(tests_directory)
os.mkdir(test_subdir)
for f in [
file1_file,
file2_file,
match1_file,
match2_file,
test_subdir_file,
]:
util.write_file(f, '')
x = [
file1_file,
file2_file,
match1_file,
match2_file,
test_subdir_file,
]
assert sorted(x) == sorted(_instance.additional_files_or_dirs)
@pytest.mark.parametrize(
'config_instance', ['_verifier_section_data'], indirect=True)
def test_env_property(_instance):
assert 'bar' == _instance.env['FOO']
assert 'ANSIBLE_CONFIG' in _instance.env
assert 'ANSIBLE_ROLES_PATH' in _instance.env
assert 'ANSIBLE_LIBRARY' in _instance.env
assert 'ANSIBLE_FILTER_PLUGINS' in _instance.env
def test_lint_property(_instance):
assert isinstance(_instance.lint, flake8.Flake8)
def test_name_property(_instance):
assert 'testinfra' == _instance.name
def test_enabled_property(_instance):
assert _instance.enabled
def test_directory_property(_instance):
parts = _instance.directory.split(os.path.sep)
assert ['molecule', 'default', 'tests'] == parts[-3:]
@pytest.fixture
def _verifier_testinfra_directory_section_data():
return {
'verifier': {
'name': 'testinfra',
'directory': '/tmp/foo/bar'
},
}
@pytest.mark.parametrize(
'config_instance', ['_verifier_testinfra_directory_section_data'],
indirect=True)
def test_directory_property_overriden(_instance):
assert '/tmp/foo/bar' == _instance.directory
@pytest.mark.parametrize(
'config_instance', ['_verifier_section_data'], indirect=True)
def test_options_property(inventory_file, _instance):
x = {
'connection': 'ansible',
'ansible-inventory': inventory_file,
'foo': 'bar',
'v': True,
'verbose': True,
'p': 'no:cacheprovider',
}
assert x == _instance.options
@pytest.mark.parametrize(
'config_instance', ['_verifier_section_data'], indirect=True)
def test_options_property_handles_cli_args(inventory_file, _instance):
_instance._config.args = {'debug': True}
x = {
'connection': 'ansible',
'ansible-inventory': inventory_file,
'foo': 'bar',
'debug': True,
'vvv': True,
'verbose': True,
'p': 'no:cacheprovider',
}
assert x == _instance.options
@pytest.mark.parametrize(
'config_instance', ['_verifier_section_data'], indirect=True)
def test_bake(_patched_testinfra_get_tests, inventory_file, _instance):
tests_directory = _instance._config.verifier.directory
file1_file = os.path.join(tests_directory, 'file1.py')
os.mkdir(tests_directory)
util.write_file(file1_file, '')
_instance.bake()
x = [
str(sh.Command('py.test')),
'--ansible-inventory={}'.format(inventory_file),
'--connection=ansible',
'-v',
'--foo=bar',
'foo.py',
'bar.py',
'-p',
'no:cacheprovider',
file1_file,
]
result = str(_instance._testinfra_command).split()
assert sorted(x) == sorted(result)
def test_execute(patched_logger_info, patched_run_command,
_patched_testinfra_get_tests, patched_logger_success,
_instance):
_instance._testinfra_command = 'patched-command'
_instance.execute()
patched_run_command.assert_called_once_with('patched-command', debug=False)
msg = 'Executing Testinfra tests found in {}/...'.format(
_instance.directory)
patched_logger_info.assert_called_once_with(msg)
msg = 'Verifier completed successfully.'
patched_logger_success.assert_called_once_with(msg)
def test_execute_does_not_execute(patched_run_command, patched_logger_warn,
_instance):
_instance._config.config['verifier']['enabled'] = False
_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, verifier is disabled.'
patched_logger_warn.assert_called_once_with(msg)
def test_does_not_execute_without_tests(patched_run_command,
patched_logger_warn, _instance):
_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, no tests found.'
patched_logger_warn.assert_called_once_with(msg)
def test_execute_bakes(patched_run_command, _patched_testinfra_get_tests,
_instance):
_instance.execute()
assert _instance._testinfra_command is not None
assert 1 == patched_run_command.call_count
def test_executes_catches_and_exits_return_code(
patched_run_command, _patched_testinfra_get_tests, _instance):
patched_run_command.side_effect = sh.ErrorReturnCode_1(
sh.testinfra, b'', b'')
with pytest.raises(SystemExit) as e:
_instance.execute()
assert 1 == e.value.code
| 1 | 7,812 | Can we get a test with multiple directories as per the initial example? | ansible-community-molecule | py |
@@ -317,8 +317,15 @@ def getStatusBar():
if not location:
return None
left, top, width, height = location
- bottom = top + height - 1
- obj = getDesktopObject().objectFromPoint(left, bottom)
+ x=1
+ while x<12:
+ bottom = top + height - x
+ if left<0:
+ left=0
+ obj = getDesktopObject().objectFromPoint(left, bottom)
+ if obj and obj.role == controlTypes.ROLE_STATUSBAR:
+ break
+ x=x+2
# We may have landed in a child of the status bar, so search the ancestry for a status bar.
while obj and not obj.role == controlTypes.ROLE_STATUSBAR: | 1 | #api.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2012 NVDA Contributors
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
"""General functions for NVDA"""
import config
import textInfos
import review
import globalVars
from logHandler import log
import ui
import treeInterceptorHandler
import virtualBuffers
import NVDAObjects
import NVDAObjects.IAccessible
import winUser
import controlTypes
import win32clipboard
import win32con
import eventHandler
import braille
import watchdog
import appModuleHandler
#User functions
def getFocusObject():
"""
Gets the current object with focus.
@returns: the object with focus
@rtype: L{NVDAObjects.NVDAObject}
"""
return globalVars.focusObject
def getForegroundObject():
"""Gets the current foreground object.
@returns: the current foreground object
@rtype: L{NVDAObjects.NVDAObject}
"""
return globalVars.foregroundObject
def setForegroundObject(obj):
"""Stores the given object as the current foreground object. (Note: it does not physically change the operating system foreground window, but only allows NVDA to keep track of what it is).
@param obj: the object that will be stored as the current foreground object
@type obj: NVDAObjects.NVDAObject
"""
if not isinstance(obj,NVDAObjects.NVDAObject):
return False
globalVars.foregroundObject=obj
return True
def setFocusObject(obj):
"""Stores an object as the current focus object. (Note: this does not physically change the window with focus in the operating system, but allows NVDA to keep track of the correct object).
Before overriding the last object, this function calls event_loseFocus on the object to notify it that it is loosing focus.
@param obj: the object that will be stored as the focus object
@type obj: NVDAObjects.NVDAObject
"""
if not isinstance(obj,NVDAObjects.NVDAObject):
return False
if globalVars.focusObject:
eventHandler.executeEvent("loseFocus",globalVars.focusObject)
oldFocusLine=globalVars.focusAncestors
#add the old focus to the old focus ancestors, but only if its not None (is none at NVDA initialization)
if globalVars.focusObject:
oldFocusLine.append(globalVars.focusObject)
oldAppModules=[o.appModule for o in oldFocusLine if o and o.appModule]
appModuleHandler.cleanup()
ancestors=[]
tempObj=obj
matchedOld=False
focusDifferenceLevel=0
oldFocusLineLength=len(oldFocusLine)
# Starting from the focus, move up the ancestor chain.
safetyCount=0
while tempObj:
if safetyCount<100:
safetyCount+=1
else:
try:
log.error("Never ending focus ancestry: last object: %s, %s, window class %s, application name %s"%(tempObj.name,controlTypes.roleLabels[tempObj.role],tempObj.windowClassName,tempObj.appModule.appName))
except:
pass
tempObj=getDesktopObject()
# Scan backwards through the old ancestors looking for a match.
for index in xrange(oldFocusLineLength-1,-1,-1):
watchdog.alive()
if tempObj==oldFocusLine[index]:
# Match! The old and new focus ancestors converge at this point.
# Copy the old ancestors up to and including this object.
origAncestors=oldFocusLine[0:index+1]
#make sure to cache the last old ancestor as a parent on the first new ancestor so as not to leave a broken parent cache
if ancestors and origAncestors:
ancestors[0].container=origAncestors[-1]
origAncestors.extend(ancestors)
ancestors=origAncestors
focusDifferenceLevel=index+1
# We don't need to process any more in either this loop or the outer loop; we have all of the ancestors.
matchedOld=True
break
if matchedOld:
break
# We're moving backwards along the ancestor chain, so add this to the start of the list.
ancestors.insert(0,tempObj)
container=tempObj.container
tempObj.container=container # Cache the parent.
tempObj=container
newAppModules=[o.appModule for o in ancestors if o and o.appModule]
#Remove the final new ancestor as this will be the new focus object
del ancestors[-1]
try:
treeInterceptorHandler.cleanup()
except watchdog.CallCancelled:
pass
treeInterceptorObject=None
o=None
watchdog.alive()
for o in ancestors[focusDifferenceLevel:]+[obj]:
try:
treeInterceptorObject=treeInterceptorHandler.update(o)
except:
log.exception("Error updating tree interceptor")
#Always make sure that the focus object's treeInterceptor is forced to either the found treeInterceptor (if its in it) or to None
#This is to make sure that the treeInterceptor does not have to be looked up, which can cause problems for winInputHook
if obj is o or obj in treeInterceptorObject:
obj.treeInterceptor=treeInterceptorObject
else:
obj.treeInterceptor=None
# #3804: handleAppSwitch should be called as late as possible,
# as triggers must not be out of sync with global focus variables.
# setFocusObject shouldn't fail earlier anyway, but it's best to be safe.
appModuleHandler.handleAppSwitch(oldAppModules,newAppModules)
# Set global focus variables.
globalVars.focusDifferenceLevel=focusDifferenceLevel
globalVars.focusObject=obj
globalVars.focusAncestors=ancestors
braille.invalidateCachedFocusAncestors(focusDifferenceLevel)
if config.conf["reviewCursor"]["followFocus"]:
setNavigatorObject(obj,isFocus=True)
return True
def getFocusDifferenceLevel():
return globalVars.focusDifferenceLevel
def getFocusAncestors():
return globalVars.focusAncestors
def getMouseObject():
"""Returns the object that is directly under the mouse"""
return globalVars.mouseObject
def setMouseObject(obj):
"""Tells NVDA to remember the given object as the object that is directly under the mouse"""
globalVars.mouseObject=obj
def getDesktopObject():
"""Get the desktop object"""
return globalVars.desktopObject
def setDesktopObject(obj):
"""Tells NVDA to remember the given object as the desktop object"""
globalVars.desktopObject=obj
def getReviewPosition():
"""Retreaves the current TextInfo instance representing the user's review position. If it is not set, it uses the user's set navigator object and creates a TextInfo from that.
"""
if globalVars.reviewPosition:
return globalVars.reviewPosition
else:
obj=globalVars.navigatorObject
globalVars.reviewPosition,globalVars.reviewPositionObj=review.getPositionForCurrentMode(obj)
return globalVars.reviewPosition
def setReviewPosition(reviewPosition,clearNavigatorObject=True):
"""Sets a TextInfo instance as the review position. if clearNavigatorObject is true, It sets the current navigator object to None so that the next time the navigator object is asked for it fetches it from the review position.
"""
globalVars.reviewPosition=reviewPosition.copy()
globalVars.reviewPositionObj=reviewPosition.obj
if clearNavigatorObject: globalVars.navigatorObject=None
import braille
braille.handler.handleReviewMove()
def getNavigatorObject():
"""Gets the current navigator object. Navigator objects can be used to navigate around the operating system (with the number pad) with out moving the focus. If the navigator object is not set, it fetches it from the review position.
@returns: the current navigator object
@rtype: L{NVDAObjects.NVDAObject}
"""
if globalVars.navigatorObject:
return globalVars.navigatorObject
else:
if review.getCurrentMode()=='object':
obj=globalVars.reviewPosition.obj
else:
try:
obj=globalVars.reviewPosition.NVDAObjectAtStart
except (NotImplementedError,LookupError):
obj=globalVars.reviewPosition.obj
globalVars.navigatorObject=getattr(obj,'rootNVDAObject',None) or obj
return globalVars.navigatorObject
def setNavigatorObject(obj,isFocus=False):
"""Sets an object to be the current navigator object. Navigator objects can be used to navigate around the operating system (with the number pad) with out moving the focus. It also sets the current review position to None so that next time the review position is asked for, it is created from the navigator object.
@param obj: the object that will be set as the current navigator object
@type obj: NVDAObjects.NVDAObject
@param isFocus: true if the navigator object was set due to a focus change.
@type isFocus: bool
"""
if not isinstance(obj,NVDAObjects.NVDAObject):
return False
globalVars.navigatorObject=obj
oldPos=globalVars.reviewPosition
oldPosObj=globalVars.reviewPositionObj
globalVars.reviewPosition=None
globalVars.reviewPositionObj=None
reviewMode=review.getCurrentMode()
# #3320: If in document review yet there is no document to review the mode should be forced to object.
if reviewMode=='document' and (not isinstance(obj.treeInterceptor,treeInterceptorHandler.DocumentTreeInterceptor) or not obj.treeInterceptor.isReady or obj.treeInterceptor.passThrough):
review.setCurrentMode('object',False)
elif isinstance(obj.treeInterceptor,treeInterceptorHandler.DocumentTreeInterceptor) and obj.treeInterceptor.isReady and not obj.treeInterceptor.passThrough:
if reviewMode=='object':
review.setCurrentMode('document',False)
if isFocus:
globalVars.reviewPosition=obj.treeInterceptor.makeTextInfo(textInfos.POSITION_CARET)
globalVars.reviewPositionObj=globalVars.reviewPosition
eventHandler.executeEvent("becomeNavigatorObject",obj)
def isTypingProtected():
"""Checks to see if key echo should be suppressed because the focus is currently on an object that has its protected state set.
@returns: True if it should be suppressed, False otherwise.
@rtype: boolean
"""
focusObject=getFocusObject()
if focusObject and (controlTypes.STATE_PROTECTED in focusObject.states or focusObject.role==controlTypes.ROLE_PASSWORDEDIT):
return True
else:
return False
def createStateList(states):
"""Breaks down the given integer in to a list of numbers that are 2 to the power of their position."""
return [x for x in [1<<y for y in xrange(32)] if x&states]
def moveMouseToNVDAObject(obj):
"""Moves the mouse to the given NVDA object's position"""
location=obj.location
if location and (len(location)==4):
(left,top,width,height)=location
x=(left+left+width)/2
y=(top+top+height)/2
winUser.setCursorPos(x,y)
def processPendingEvents(processEventQueue=True):
# Import late to avoid circular import.
import IAccessibleHandler
import JABHandler
import wx
import queueHandler
watchdog.alive()
wx.Yield()
JABHandler.pumpAll()
IAccessibleHandler.pumpAll()
import baseObject
baseObject.AutoPropertyObject.invalidateCaches()
if processEventQueue:
queueHandler.flushQueue(queueHandler.eventQueue)
def copyToClip(text):
"""Copies the given text to the windows clipboard.
@returns: True if it succeeds, False otherwise.
@rtype: boolean
@param text: the text which will be copied to the clipboard
@type text: string
"""
if isinstance(text,basestring) and len(text)>0 and not text.isspace():
try:
win32clipboard.OpenClipboard()
except win32clipboard.error:
return False
try:
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardData(win32con.CF_UNICODETEXT, text)
finally:
win32clipboard.CloseClipboard()
win32clipboard.OpenClipboard() # there seems to be a bug so to retrieve unicode text we have to reopen the clipboard
try:
got = win32clipboard.GetClipboardData(win32con.CF_UNICODETEXT)
finally:
win32clipboard.CloseClipboard()
if got == text:
return True
return False
def getClipData():
"""Receives text from the windows clipboard.
@returns: Clipboard text
@rtype: string
"""
text = ""
win32clipboard.OpenClipboard()
try:
text = win32clipboard.GetClipboardData(win32con.CF_UNICODETEXT)
finally:
win32clipboard.CloseClipboard()
return text
def getStatusBar():
"""Obtain the status bar for the current foreground object.
@return: The status bar object or C{None} if no status bar was found.
@rtype: L{NVDAObjects.NVDAObject}
"""
# The status bar is usually at the bottom of the screen.
# Therefore, get the object at the bottom left of the foreground object using screen coordinates.
foreground = getForegroundObject()
location=foreground.location
if not location:
return None
left, top, width, height = location
bottom = top + height - 1
obj = getDesktopObject().objectFromPoint(left, bottom)
# We may have landed in a child of the status bar, so search the ancestry for a status bar.
while obj and not obj.role == controlTypes.ROLE_STATUSBAR:
obj = obj.parent
return obj
def getStatusBarText(obj):
"""Get the text from a status bar.
This includes the name of the status bar and the names and values of all of its children.
@param obj: The status bar.
@type obj: L{NVDAObjects.NVDAObject}
@return: The status bar text.
@rtype: str
"""
text = obj.name or ""
if text:
text += " "
return text + " ".join(chunk for child in obj.children for chunk in (child.name, child.value) if chunk and isinstance(chunk, basestring) and not chunk.isspace())
def filterFileName(name):
"""Replaces invalid characters in a given string to make a windows compatible file name.
@param name: The file name to filter.
@type name: str
@returns: The filtered file name.
@rtype: str
"""
invalidChars=':?*\|<>/"'
for c in invalidChars:
name=name.replace(c,'_')
return name
def getCaretObject():
"""Gets the object which contains the caret.
This is normally the focus object.
However, if the focus object has a tree interceptor which is not in focus mode,
the tree interceptor will be returned.
@return: The object containing the caret.
@rtype: L{baseObject.ScriptableObject}
"""
obj = getFocusObject()
ti = obj.treeInterceptor
if isinstance(ti,treeInterceptorHandler.DocumentTreeInterceptor) and ti.isReady and not ti.passThrough:
return ti
return obj
| 1 | 17,530 | I assume this is because you were seeing cases where left was a negative number? When? | nvaccess-nvda | py |
@@ -11,10 +11,10 @@
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
-#include "fix_deposit.h"
-#include <mpi.h>
#include <cmath>
+#include <cstdlib>
#include <cstring>
+#include "fix_deposit.h"
#include "atom.h"
#include "atom_vec.h"
#include "molecule.h" | 1 | /* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, [email protected]
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#include "fix_deposit.h"
#include <mpi.h>
#include <cmath>
#include <cstring>
#include "atom.h"
#include "atom_vec.h"
#include "molecule.h"
#include "force.h"
#include "update.h"
#include "modify.h"
#include "fix.h"
#include "comm.h"
#include "domain.h"
#include "lattice.h"
#include "region.h"
#include "random_park.h"
#include "math_extra.h"
#include "math_const.h"
#include "memory.h"
#include "error.h"
using namespace LAMMPS_NS;
using namespace FixConst;
using namespace MathConst;
enum{ATOM,MOLECULE};
enum{DIST_UNIFORM,DIST_GAUSSIAN};
#define EPSILON 1.0e6
/* ---------------------------------------------------------------------- */
FixDeposit::FixDeposit(LAMMPS *lmp, int narg, char **arg) :
Fix(lmp, narg, arg), idregion(NULL), idrigid(NULL),
idshake(NULL), onemols(NULL), molfrac(NULL), coords(NULL), imageflags(NULL),
fixrigid(NULL), fixshake(NULL), random(NULL)
{
if (narg < 7) error->all(FLERR,"Illegal fix deposit command");
restart_global = 1;
time_depend = 1;
// required args
ninsert = force->inumeric(FLERR,arg[3]);
ntype = force->inumeric(FLERR,arg[4]);
nfreq = force->inumeric(FLERR,arg[5]);
seed = force->inumeric(FLERR,arg[6]);
if (seed <= 0) error->all(FLERR,"Illegal fix deposit command");
// read options from end of input line
options(narg-7,&arg[7]);
// error check on type
if (mode == ATOM && (ntype <= 0 || ntype > atom->ntypes))
error->all(FLERR,"Invalid atom type in fix deposit command");
// error checks on region and its extent being inside simulation box
if (iregion == -1) error->all(FLERR,"Must specify a region in fix deposit");
if (domain->regions[iregion]->bboxflag == 0)
error->all(FLERR,"Fix deposit region does not support a bounding box");
if (domain->regions[iregion]->dynamic_check())
error->all(FLERR,"Fix deposit region cannot be dynamic");
xlo = domain->regions[iregion]->extent_xlo;
xhi = domain->regions[iregion]->extent_xhi;
ylo = domain->regions[iregion]->extent_ylo;
yhi = domain->regions[iregion]->extent_yhi;
zlo = domain->regions[iregion]->extent_zlo;
zhi = domain->regions[iregion]->extent_zhi;
if (domain->triclinic == 0) {
if (xlo < domain->boxlo[0] || xhi > domain->boxhi[0] ||
ylo < domain->boxlo[1] || yhi > domain->boxhi[1] ||
zlo < domain->boxlo[2] || zhi > domain->boxhi[2])
error->all(FLERR,"Deposition region extends outside simulation box");
} else {
if (xlo < domain->boxlo_bound[0] || xhi > domain->boxhi_bound[0] ||
ylo < domain->boxlo_bound[1] || yhi > domain->boxhi_bound[1] ||
zlo < domain->boxlo_bound[2] || zhi > domain->boxhi_bound[2])
error->all(FLERR,"Deposition region extends outside simulation box");
}
// error check and further setup for mode = MOLECULE
if (atom->tag_enable == 0)
error->all(FLERR,"Cannot use fix_deposit unless atoms have IDs");
if (mode == MOLECULE) {
for (int i = 0; i < nmol; i++) {
if (onemols[i]->xflag == 0)
error->all(FLERR,"Fix deposit molecule must have coordinates");
if (onemols[i]->typeflag == 0)
error->all(FLERR,"Fix deposit molecule must have atom types");
if (ntype+onemols[i]->ntypes <= 0 ||
ntype+onemols[i]->ntypes > atom->ntypes)
error->all(FLERR,"Invalid atom type in fix deposit mol command");
if (atom->molecular == 2 && onemols != atom->avec->onemols)
error->all(FLERR,"Fix deposit molecule template ID must be same "
"as atom_style template ID");
onemols[i]->check_attributes(0);
// fix deposit uses geoemetric center of molecule for insertion
onemols[i]->compute_center();
}
}
if (rigidflag && mode == ATOM)
error->all(FLERR,"Cannot use fix deposit rigid and not molecule");
if (shakeflag && mode == ATOM)
error->all(FLERR,"Cannot use fix deposit shake and not molecule");
if (rigidflag && shakeflag)
error->all(FLERR,"Cannot use fix deposit rigid and shake");
// setup of coords and imageflags array
if (mode == ATOM) natom_max = 1;
else {
natom_max = 0;
for (int i = 0; i < nmol; i++)
natom_max = MAX(natom_max,onemols[i]->natoms);
}
memory->create(coords,natom_max,3,"deposit:coords");
memory->create(imageflags,natom_max,"deposit:imageflags");
// setup scaling
double xscale,yscale,zscale;
if (scaleflag) {
xscale = domain->lattice->xlattice;
yscale = domain->lattice->ylattice;
zscale = domain->lattice->zlattice;
}
else xscale = yscale = zscale = 1.0;
// apply scaling to all input parameters with dist/vel units
if (domain->dimension == 2) {
lo *= yscale;
hi *= yscale;
rate *= yscale;
} else {
lo *= zscale;
hi *= zscale;
rate *= zscale;
}
deltasq *= xscale*xscale;
nearsq *= xscale*xscale;
vxlo *= xscale;
vxhi *= xscale;
vylo *= yscale;
vyhi *= yscale;
vzlo *= zscale;
vzhi *= zscale;
xmid *= xscale;
ymid *= yscale;
zmid *= zscale;
sigma *= xscale; // same as in region sphere
tx *= xscale;
ty *= yscale;
tz *= zscale;
// find current max atom and molecule IDs if necessary
if (idnext) find_maxid();
// random number generator, same for all procs
random = new RanPark(lmp,seed);
// set up reneighboring
force_reneighbor = 1;
next_reneighbor = update->ntimestep + 1;
nfirst = next_reneighbor;
ninserted = 0;
}
/* ---------------------------------------------------------------------- */
FixDeposit::~FixDeposit()
{
delete random;
delete [] molfrac;
delete [] idrigid;
delete [] idshake;
delete [] idregion;
memory->destroy(coords);
memory->destroy(imageflags);
}
/* ---------------------------------------------------------------------- */
int FixDeposit::setmask()
{
int mask = 0;
mask |= PRE_EXCHANGE;
return mask;
}
/* ---------------------------------------------------------------------- */
void FixDeposit::init()
{
// set index and check validity of region
iregion = domain->find_region(idregion);
if (iregion == -1)
error->all(FLERR,"Region ID for fix deposit does not exist");
// if rigidflag defined, check for rigid/small fix
// its molecule template must be same as this one
fixrigid = NULL;
if (rigidflag) {
int ifix = modify->find_fix(idrigid);
if (ifix < 0) error->all(FLERR,"Fix deposit rigid fix does not exist");
fixrigid = modify->fix[ifix];
int tmp;
if (onemols != (Molecule **) fixrigid->extract("onemol",tmp))
error->all(FLERR,
"Fix deposit and fix rigid/small not using "
"same molecule template ID");
}
// if shakeflag defined, check for SHAKE fix
// its molecule template must be same as this one
fixshake = NULL;
if (shakeflag) {
int ifix = modify->find_fix(idshake);
if (ifix < 0) error->all(FLERR,"Fix deposit shake fix does not exist");
fixshake = modify->fix[ifix];
int tmp;
if (onemols != (Molecule **) fixshake->extract("onemol",tmp))
error->all(FLERR,"Fix deposit and fix shake not using "
"same molecule template ID");
}
// for finite size spherical particles:
// warn if near < 2 * maxrad of existing and inserted particles
// since may lead to overlaps
// if inserted molecule does not define diameters,
// use AtomVecSphere::create_atom() default radius = 0.5
if (atom->radius_flag) {
double *radius = atom->radius;
int nlocal = atom->nlocal;
double maxrad = 0.0;
for (int i = 0; i < nlocal; i++)
maxrad = MAX(maxrad,radius[i]);
double maxradall;
MPI_Allreduce(&maxrad,&maxradall,1,MPI_DOUBLE,MPI_MAX,world);
double maxradinsert = 0.0;
if (mode == MOLECULE) {
for (int i = 0; i < nmol; i++) {
if (onemols[i]->radiusflag)
maxradinsert = MAX(maxradinsert,onemols[i]->maxradius);
else maxradinsert = MAX(maxradinsert,0.5);
}
} else maxradinsert = 0.5;
double separation = MAX(2.0*maxradinsert,maxradall+maxradinsert);
if (sqrt(nearsq) < separation && comm->me == 0) {
char str[128];
sprintf(str,"Fix deposit near setting < possible overlap separation %g",
separation);
error->warning(FLERR,str);
}
}
}
/* ----------------------------------------------------------------------
perform particle insertion
------------------------------------------------------------------------- */
void FixDeposit::pre_exchange()
{
int i,m,n,nlocalprev,imol,natom,flag,flagall;
double coord[3],lamda[3],delx,dely,delz,rsq;
double r[3],vnew[3],rotmat[3][3],quat[4];
double *newcoord;
// just return if should not be called on this timestep
if (next_reneighbor != update->ntimestep) return;
// clear ghost count and any ghost bonus data internal to AtomVec
// same logic as beginning of Comm::exchange()
// do it now b/c inserting atoms will overwrite ghost atoms
atom->nghost = 0;
atom->avec->clear_bonus();
// compute current offset = bottom of insertion volume
double offset = 0.0;
if (rateflag) offset = (update->ntimestep - nfirst) * update->dt * rate;
double *sublo,*subhi;
if (domain->triclinic == 0) {
sublo = domain->sublo;
subhi = domain->subhi;
} else {
sublo = domain->sublo_lamda;
subhi = domain->subhi_lamda;
}
// find current max atom and molecule IDs if necessary
if (!idnext) find_maxid();
// attempt an insertion until successful
int dimension = domain->dimension;
int success = 0;
int attempt = 0;
while (attempt < maxattempt) {
attempt++;
// choose random position for new particle within region
if (distflag == DIST_UNIFORM) {
do {
coord[0] = xlo + random->uniform() * (xhi-xlo);
coord[1] = ylo + random->uniform() * (yhi-ylo);
coord[2] = zlo + random->uniform() * (zhi-zlo);
} while (domain->regions[iregion]->match(coord[0],coord[1],coord[2]) == 0);
} else if (distflag == DIST_GAUSSIAN) {
do {
coord[0] = xmid + random->gaussian() * sigma;
coord[1] = ymid + random->gaussian() * sigma;
coord[2] = zmid + random->gaussian() * sigma;
} while (domain->regions[iregion]->match(coord[0],coord[1],coord[2]) == 0);
} else error->all(FLERR,"Unknown particle distribution in fix deposit");
// adjust vertical coord by offset
if (dimension == 2) coord[1] += offset;
else coord[2] += offset;
// if global, reset vertical coord to be lo-hi above highest atom
// if local, reset vertical coord to be lo-hi above highest "nearby" atom
// local computation computes lateral distance between 2 particles w/ PBC
// when done, have final coord of atom or center pt of molecule
if (globalflag || localflag) {
int dim;
double max,maxall,delx,dely,delz,rsq;
if (dimension == 2) {
dim = 1;
max = domain->boxlo[1];
} else {
dim = 2;
max = domain->boxlo[2];
}
double **x = atom->x;
int nlocal = atom->nlocal;
for (i = 0; i < nlocal; i++) {
if (localflag) {
delx = coord[0] - x[i][0];
dely = coord[1] - x[i][1];
delz = 0.0;
domain->minimum_image(delx,dely,delz);
if (dimension == 2) rsq = delx*delx;
else rsq = delx*delx + dely*dely;
if (rsq > deltasq) continue;
}
if (x[i][dim] > max) max = x[i][dim];
}
MPI_Allreduce(&max,&maxall,1,MPI_DOUBLE,MPI_MAX,world);
if (dimension == 2)
coord[1] = maxall + lo + random->uniform()*(hi-lo);
else
coord[2] = maxall + lo + random->uniform()*(hi-lo);
}
// coords = coords of all atoms
// for molecule, perform random rotation around center pt
// apply PBC so final coords are inside box
// also modify image flags due to PBC
if (mode == ATOM) {
natom = 1;
coords[0][0] = coord[0];
coords[0][1] = coord[1];
coords[0][2] = coord[2];
imageflags[0] = ((imageint) IMGMAX << IMG2BITS) |
((imageint) IMGMAX << IMGBITS) | IMGMAX;
} else {
double rng = random->uniform();
imol = 0;
while (rng > molfrac[imol]) imol++;
natom = onemols[imol]->natoms;
if (dimension == 3) {
r[0] = random->uniform() - 0.5;
r[1] = random->uniform() - 0.5;
r[2] = random->uniform() - 0.5;
} else {
r[0] = r[1] = 0.0;
r[2] = 1.0;
}
double theta = random->uniform() * MY_2PI;
MathExtra::norm3(r);
MathExtra::axisangle_to_quat(r,theta,quat);
MathExtra::quat_to_mat(quat,rotmat);
for (i = 0; i < natom; i++) {
MathExtra::matvec(rotmat,onemols[imol]->dx[i],coords[i]);
coords[i][0] += coord[0];
coords[i][1] += coord[1];
coords[i][2] += coord[2];
imageflags[i] = ((imageint) IMGMAX << IMG2BITS) |
((imageint) IMGMAX << IMGBITS) | IMGMAX;
domain->remap(coords[i],imageflags[i]);
}
}
// check distance between any existing atom and any inserted atom
// if less than near, try again
// use minimum_image() to account for PBC
double **x = atom->x;
int nlocal = atom->nlocal;
flag = 0;
for (m = 0; m < natom; m++) {
for (i = 0; i < nlocal; i++) {
delx = coords[m][0] - x[i][0];
dely = coords[m][1] - x[i][1];
delz = coords[m][2] - x[i][2];
domain->minimum_image(delx,dely,delz);
rsq = delx*delx + dely*dely + delz*delz;
if (rsq < nearsq) flag = 1;
}
}
MPI_Allreduce(&flag,&flagall,1,MPI_INT,MPI_MAX,world);
if (flagall) continue;
// proceed with insertion
nlocalprev = atom->nlocal;
// choose random velocity for new particle
// used for every atom in molecule
vnew[0] = vxlo + random->uniform() * (vxhi-vxlo);
vnew[1] = vylo + random->uniform() * (vyhi-vylo);
vnew[2] = vzlo + random->uniform() * (vzhi-vzlo);
// if target specified, change velocity vector accordingly
if (targetflag) {
double vel = sqrt(vnew[0]*vnew[0] + vnew[1]*vnew[1] + vnew[2]*vnew[2]);
delx = tx - coord[0];
dely = ty - coord[1];
delz = tz - coord[2];
double rsq = delx*delx + dely*dely + delz*delz;
if (rsq > 0.0) {
double rinv = sqrt(1.0/rsq);
vnew[0] = delx*rinv*vel;
vnew[1] = dely*rinv*vel;
vnew[2] = delz*rinv*vel;
}
}
// check if new atoms are in my sub-box or above it if I am highest proc
// if so, add atom to my list via create_atom()
// initialize additional info about the atoms
// set group mask to "all" plus fix group
for (m = 0; m < natom; m++) {
if (domain->triclinic) {
domain->x2lamda(coords[m],lamda);
newcoord = lamda;
} else newcoord = coords[m];
flag = 0;
if (newcoord[0] >= sublo[0] && newcoord[0] < subhi[0] &&
newcoord[1] >= sublo[1] && newcoord[1] < subhi[1] &&
newcoord[2] >= sublo[2] && newcoord[2] < subhi[2]) flag = 1;
else if (dimension == 3 && newcoord[2] >= domain->boxhi[2]) {
if (comm->layout != Comm::LAYOUT_TILED) {
if (comm->myloc[2] == comm->procgrid[2]-1 &&
newcoord[0] >= sublo[0] && newcoord[0] < subhi[0] &&
newcoord[1] >= sublo[1] && newcoord[1] < subhi[1]) flag = 1;
} else {
if (comm->mysplit[2][1] == 1.0 &&
newcoord[0] >= sublo[0] && newcoord[0] < subhi[0] &&
newcoord[1] >= sublo[1] && newcoord[1] < subhi[1]) flag = 1;
}
} else if (dimension == 2 && newcoord[1] >= domain->boxhi[1]) {
if (comm->layout != Comm::LAYOUT_TILED) {
if (comm->myloc[1] == comm->procgrid[1]-1 &&
newcoord[0] >= sublo[0] && newcoord[0] < subhi[0]) flag = 1;
} else {
if (comm->mysplit[1][1] == 1.0 &&
newcoord[0] >= sublo[0] && newcoord[0] < subhi[0]) flag = 1;
}
}
if (flag) {
if (mode == ATOM) atom->avec->create_atom(ntype,coords[m]);
else atom->avec->create_atom(ntype+onemols[imol]->type[m],coords[m]);
n = atom->nlocal - 1;
atom->tag[n] = maxtag_all + m+1;
if (mode == MOLECULE) {
if (atom->molecule_flag) atom->molecule[n] = maxmol_all+1;
if (atom->molecular == 2) {
atom->molindex[n] = 0;
atom->molatom[n] = m;
}
}
atom->mask[n] = 1 | groupbit;
atom->image[n] = imageflags[m];
atom->v[n][0] = vnew[0];
atom->v[n][1] = vnew[1];
atom->v[n][2] = vnew[2];
if (mode == MOLECULE) {
onemols[imol]->quat_external = quat;
atom->add_molecule_atom(onemols[imol],m,n,maxtag_all);
}
modify->create_attribute(n);
}
}
// FixRigidSmall::set_molecule stores rigid body attributes
// coord is new position of geometric center of mol, not COM
// FixShake::set_molecule stores shake info for molecule
if (rigidflag)
fixrigid->set_molecule(nlocalprev,maxtag_all,imol,coord,vnew,quat);
else if (shakeflag)
fixshake->set_molecule(nlocalprev,maxtag_all,imol,coord,vnew,quat);
// old code: unsuccessful if no proc performed insertion of an atom
// don't think that check is necessary
// if get this far, should always be succesful
// would be hard to undo partial insertion for a molecule
// better to check how many atoms could be inserted (w/out inserting)
// then sum to insure all are inserted, before doing actual insertion
// MPI_Allreduce(&flag,&success,1,MPI_INT,MPI_MAX,world);
success = 1;
break;
}
// warn if not successful b/c too many attempts
if (!success && comm->me == 0)
error->warning(FLERR,"Particle deposition was unsuccessful",0);
// reset global natoms,nbonds,etc
// increment maxtag_all and maxmol_all if necessary
// if global map exists, reset it now instead of waiting for comm
// since other pre-exchange fixes may use it
// invoke map_init() b/c atom count has grown
if (success) {
atom->natoms += natom;
if (atom->natoms < 0)
error->all(FLERR,"Too many total atoms");
if (mode == MOLECULE) {
atom->nbonds += onemols[imol]->nbonds;
atom->nangles += onemols[imol]->nangles;
atom->ndihedrals += onemols[imol]->ndihedrals;
atom->nimpropers += onemols[imol]->nimpropers;
}
maxtag_all += natom;
if (maxtag_all >= MAXTAGINT)
error->all(FLERR,"New atom IDs exceed maximum allowed ID");
if (mode == MOLECULE && atom->molecule_flag) maxmol_all++;
if (atom->map_style) {
atom->map_init();
atom->map_set();
}
}
// next timestep to insert
// next_reneighbor = 0 if done
if (success) ninserted++;
if (ninserted < ninsert) next_reneighbor += nfreq;
else next_reneighbor = 0;
}
/* ----------------------------------------------------------------------
maxtag_all = current max atom ID for all atoms
maxmol_all = current max molecule ID for all atoms
------------------------------------------------------------------------- */
void FixDeposit::find_maxid()
{
tagint *tag = atom->tag;
tagint *molecule = atom->molecule;
int nlocal = atom->nlocal;
tagint max = 0;
for (int i = 0; i < nlocal; i++) max = MAX(max,tag[i]);
MPI_Allreduce(&max,&maxtag_all,1,MPI_LMP_TAGINT,MPI_MAX,world);
if (mode == MOLECULE && molecule) {
max = 0;
for (int i = 0; i < nlocal; i++) max = MAX(max,molecule[i]);
MPI_Allreduce(&max,&maxmol_all,1,MPI_LMP_TAGINT,MPI_MAX,world);
}
}
/* ----------------------------------------------------------------------
parse optional parameters at end of input line
------------------------------------------------------------------------- */
void FixDeposit::options(int narg, char **arg)
{
// defaults
iregion = -1;
idregion = NULL;
mode = ATOM;
molfrac = NULL;
rigidflag = 0;
idrigid = NULL;
shakeflag = 0;
idshake = NULL;
idnext = 0;
globalflag = localflag = 0;
lo = hi = deltasq = 0.0;
nearsq = 0.0;
maxattempt = 10;
rateflag = 0;
vxlo = vxhi = vylo = vyhi = vzlo = vzhi = 0.0;
distflag = DIST_UNIFORM;
sigma = 1.0;
xmid = ymid = zmid = 0.0;
scaleflag = 1;
targetflag = 0;
int iarg = 0;
while (iarg < narg) {
if (strcmp(arg[iarg],"region") == 0) {
if (iarg+2 > narg) error->all(FLERR,"Illegal fix deposit command");
iregion = domain->find_region(arg[iarg+1]);
if (iregion == -1)
error->all(FLERR,"Region ID for fix deposit does not exist");
int n = strlen(arg[iarg+1]) + 1;
idregion = new char[n];
strcpy(idregion,arg[iarg+1]);
iarg += 2;
} else if (strcmp(arg[iarg],"mol") == 0) {
if (iarg+2 > narg) error->all(FLERR,"Illegal fix deposit command");
int imol = atom->find_molecule(arg[iarg+1]);
if (imol == -1)
error->all(FLERR,"Molecule template ID for fix deposit does not exist");
mode = MOLECULE;
onemols = &atom->molecules[imol];
nmol = onemols[0]->nset;
delete [] molfrac;
molfrac = new double[nmol];
molfrac[0] = 1.0/nmol;
for (int i = 1; i < nmol-1; i++) molfrac[i] = molfrac[i-1] + 1.0/nmol;
molfrac[nmol-1] = 1.0;
iarg += 2;
} else if (strcmp(arg[iarg],"molfrac") == 0) {
if (mode != MOLECULE) error->all(FLERR,"Illegal fix deposit command");
if (iarg+nmol+1 > narg) error->all(FLERR,"Illegal fix deposit command");
molfrac[0] = force->numeric(FLERR,arg[iarg+1]);
for (int i = 1; i < nmol; i++)
molfrac[i] = molfrac[i-1] + force->numeric(FLERR,arg[iarg+i+1]);
if (molfrac[nmol-1] < 1.0-EPSILON || molfrac[nmol-1] > 1.0+EPSILON)
error->all(FLERR,"Illegal fix deposit command");
molfrac[nmol-1] = 1.0;
iarg += nmol+1;
} else if (strcmp(arg[iarg],"rigid") == 0) {
if (iarg+2 > narg) error->all(FLERR,"Illegal fix deposit command");
int n = strlen(arg[iarg+1]) + 1;
delete [] idrigid;
idrigid = new char[n];
strcpy(idrigid,arg[iarg+1]);
rigidflag = 1;
iarg += 2;
} else if (strcmp(arg[iarg],"shake") == 0) {
if (iarg+2 > narg) error->all(FLERR,"Illegal fix deposit command");
int n = strlen(arg[iarg+1]) + 1;
delete [] idshake;
idshake = new char[n];
strcpy(idshake,arg[iarg+1]);
shakeflag = 1;
iarg += 2;
} else if (strcmp(arg[iarg],"id") == 0) {
if (iarg+2 > narg) error->all(FLERR,"Illegal fix deposit command");
if (strcmp(arg[iarg+1],"max") == 0) idnext = 0;
else if (strcmp(arg[iarg+1],"next") == 0) idnext = 1;
else error->all(FLERR,"Illegal fix deposit command");
iarg += 2;
} else if (strcmp(arg[iarg],"global") == 0) {
if (iarg+3 > narg) error->all(FLERR,"Illegal fix deposit command");
globalflag = 1;
localflag = 0;
lo = force->numeric(FLERR,arg[iarg+1]);
hi = force->numeric(FLERR,arg[iarg+2]);
iarg += 3;
} else if (strcmp(arg[iarg],"local") == 0) {
if (iarg+4 > narg) error->all(FLERR,"Illegal fix deposit command");
localflag = 1;
globalflag = 0;
lo = force->numeric(FLERR,arg[iarg+1]);
hi = force->numeric(FLERR,arg[iarg+2]);
deltasq = force->numeric(FLERR,arg[iarg+3]) *
force->numeric(FLERR,arg[iarg+3]);
iarg += 4;
} else if (strcmp(arg[iarg],"near") == 0) {
if (iarg+2 > narg) error->all(FLERR,"Illegal fix deposit command");
nearsq = force->numeric(FLERR,arg[iarg+1]) *
force->numeric(FLERR,arg[iarg+1]);
iarg += 2;
} else if (strcmp(arg[iarg],"attempt") == 0) {
if (iarg+2 > narg) error->all(FLERR,"Illegal fix deposit command");
maxattempt = force->inumeric(FLERR,arg[iarg+1]);
iarg += 2;
} else if (strcmp(arg[iarg],"rate") == 0) {
if (iarg+2 > narg) error->all(FLERR,"Illegal fix deposit command");
rateflag = 1;
rate = force->numeric(FLERR,arg[iarg+1]);
iarg += 2;
} else if (strcmp(arg[iarg],"vx") == 0) {
if (iarg+3 > narg) error->all(FLERR,"Illegal fix deposit command");
vxlo = force->numeric(FLERR,arg[iarg+1]);
vxhi = force->numeric(FLERR,arg[iarg+2]);
iarg += 3;
} else if (strcmp(arg[iarg],"vy") == 0) {
if (iarg+3 > narg) error->all(FLERR,"Illegal fix deposit command");
vylo = force->numeric(FLERR,arg[iarg+1]);
vyhi = force->numeric(FLERR,arg[iarg+2]);
iarg += 3;
} else if (strcmp(arg[iarg],"vz") == 0) {
if (iarg+3 > narg) error->all(FLERR,"Illegal fix deposit command");
vzlo = force->numeric(FLERR,arg[iarg+1]);
vzhi = force->numeric(FLERR,arg[iarg+2]);
iarg += 3;
} else if (strcmp(arg[iarg],"units") == 0) {
if (iarg+2 > narg) error->all(FLERR,"Illegal fix deposit command");
if (strcmp(arg[iarg+1],"box") == 0) scaleflag = 0;
else if (strcmp(arg[iarg+1],"lattice") == 0) scaleflag = 1;
else error->all(FLERR,"Illegal fix deposit command");
iarg += 2;
} else if (strcmp(arg[iarg],"gaussian") == 0) {
if (iarg+5 > narg) error->all(FLERR,"Illegal fix deposit command");
xmid = force->numeric(FLERR,arg[iarg+1]);
ymid = force->numeric(FLERR,arg[iarg+2]);
zmid = force->numeric(FLERR,arg[iarg+3]);
sigma = force->numeric(FLERR,arg[iarg+4]);
distflag = DIST_GAUSSIAN;
iarg += 5;
} else if (strcmp(arg[iarg],"target") == 0) {
if (iarg+4 > narg) error->all(FLERR,"Illegal fix deposit command");
tx = force->numeric(FLERR,arg[iarg+1]);
ty = force->numeric(FLERR,arg[iarg+2]);
tz = force->numeric(FLERR,arg[iarg+3]);
targetflag = 1;
iarg += 4;
} else error->all(FLERR,"Illegal fix deposit command");
}
}
/* ----------------------------------------------------------------------
pack entire state of Fix into one write
------------------------------------------------------------------------- */
void FixDeposit::write_restart(FILE *fp)
{
int n = 0;
double list[5];
list[n++] = random->state();
list[n++] = ninserted;
list[n++] = nfirst;
list[n++] = ubuf(next_reneighbor).d;
list[n++] = ubuf(update->ntimestep).d;
if (comm->me == 0) {
int size = n * sizeof(double);
fwrite(&size,sizeof(int),1,fp);
fwrite(list,sizeof(double),n,fp);
}
}
/* ----------------------------------------------------------------------
use state info from restart file to restart the Fix
------------------------------------------------------------------------- */
void FixDeposit::restart(char *buf)
{
int n = 0;
double *list = (double *) buf;
seed = static_cast<int> (list[n++]);
ninserted = static_cast<int> (list[n++]);
nfirst = static_cast<int> (list[n++]);
next_reneighbor = (bigint) ubuf(list[n++]).i;
bigint ntimestep_restart = (bigint) ubuf(list[n++]).i;
if (ntimestep_restart != update->ntimestep)
error->all(FLERR,"Must not reset timestep when restarting this fix");
random->reset(seed);
}
/* ----------------------------------------------------------------------
extract particle radius for atom type = itype
------------------------------------------------------------------------- */
void *FixDeposit::extract(const char *str, int &itype)
{
if (strcmp(str,"radius") == 0) {
if (mode == ATOM) {
if (itype == ntype) oneradius = 0.5;
else oneradius = 0.0;
} else {
// loop over onemols molecules
// skip a molecule with no atoms as large as itype
oneradius = 0.0;
for (int i = 0; i < nmol; i++) {
if (itype > ntype+onemols[i]->ntypes) continue;
double *radius = onemols[i]->radius;
int *type = onemols[i]->type;
int natoms = onemols[i]->natoms;
// check radii of atoms in Molecule with matching types
// default to 0.5, if radii not defined in Molecule
// same as atom->avec->create_atom(), invoked in pre_exchange()
for (int i = 0; i < natoms; i++)
if (type[i]+ntype == itype) {
if (radius) oneradius = MAX(oneradius,radius[i]);
else oneradius = MAX(oneradius,0.5);
}
}
}
itype = 0;
return &oneradius;
}
return NULL;
}
| 1 | 27,002 | this is undoing recent changes for increased consistency of include file statements. please restore to the original. | lammps-lammps | cpp |
@@ -24,12 +24,13 @@ public class BaselineErrorProneExtension {
private static final ImmutableList<String> DEFAULT_PATCH_CHECKS = ImmutableList.of(
// Baseline checks
+ "LambdaMethodReference",
+ "OptionalOrElseMethodInvocation",
"PreferBuiltInConcurrentKeySet",
"PreferCollectionTransform",
"PreferListsPartition",
"PreferSafeLoggableExceptions",
"PreferSafeLoggingPreconditions",
- "OptionalOrElseMethodInvocation",
// Built-in checks
"ArrayEquals", | 1 | /*
* (c) Copyright 2019 Palantir Technologies Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.palantir.baseline.extensions;
import com.google.common.collect.ImmutableList;
import org.gradle.api.Project;
import org.gradle.api.provider.ListProperty;
public class BaselineErrorProneExtension {
private static final ImmutableList<String> DEFAULT_PATCH_CHECKS = ImmutableList.of(
// Baseline checks
"PreferBuiltInConcurrentKeySet",
"PreferCollectionTransform",
"PreferListsPartition",
"PreferSafeLoggableExceptions",
"PreferSafeLoggingPreconditions",
"OptionalOrElseMethodInvocation",
// Built-in checks
"ArrayEquals",
"MissingOverride");
private final ListProperty<String> patchChecks;
public BaselineErrorProneExtension(Project project) {
patchChecks = project.getObjects().listProperty(String.class);
patchChecks.set(DEFAULT_PATCH_CHECKS);
}
public final ListProperty<String> getPatchChecks() {
return patchChecks;
}
}
| 1 | 7,271 | If these are applied in order, should `LambdaMethoReference` come after `OptionalOrElseMethodInvocation` | palantir-gradle-baseline | java |
@@ -42,7 +42,7 @@ namespace Microsoft.CodeAnalysis.Sarif
/// A dictionary whose keys are the strings representing the locations of scanned files
/// and whose values provide information about those files.
/// </param>
- void WriteFiles(IDictionary<string, IList<FileData>> fileDictionary);
+ void WriteFiles(IDictionary<string, FileData> filesDictionary);
/// <summary>
/// Write information about the logical locations where results were produced to | 1 | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System.Collections.Generic;
namespace Microsoft.CodeAnalysis.Sarif
{
/// <summary>This interface serves as a sink for <see cref="SarifLog"/> format issues.</summary>
public interface IResultLogWriter
{
/// <summary>
/// Initialize the current output log.
/// </summary>
/// <param name="id">A string that uniquely identifies a run.</param>
/// <param name="correlationId">A global identifier for a run that permits correlation with a larger automation process.</param>
void Initialize(string id, string correlationId);
/// <summary>Writes tool information to the log.</summary>
/// <exception cref="IOException">A file IO error occured. Clients implementing
/// <see cref="IToolFileConverter"/> should allow these exceptions to propagate.</exception>
/// <exception cref="InvalidOperationException">Thrown if the tool info block has already been
/// written.</exception>
/// <exception cref="ArgumentNullException">Thrown if <paramref name="info"/> is null.</exception>
/// <param name="tool">The tool information to write.</param>
void WriteTool(Tool tool);
/// <summary>Writes run information to the log. This object may appear after
/// the results, as it can contain data that can't be computed (such as the run
/// end time) until all results have been generated.</summary>
/// <exception cref="IOException">A file IO error occured. Clients implementing
/// <see cref="IToolFileConverter"/> should allow these exceptions to propagate.</exception>
/// <exception cref="InvalidOperationException">Thrown if the object has already been
/// written.</exception>
void WriteInvocation(Invocation invocation);
/// <summary>
/// Write information about scanned files to the log. This information may appear
/// after the results, as the full list of scanned files might not be known until
/// all results have been generated.
/// </summary>
/// <param name="fileDictionary">
/// A dictionary whose keys are the strings representing the locations of scanned files
/// and whose values provide information about those files.
/// </param>
void WriteFiles(IDictionary<string, IList<FileData>> fileDictionary);
/// <summary>
/// Write information about the logical locations where results were produced to
/// the log. This information may appear after the results, as the full list of
/// logical locations will not be known until all results have been generated.
/// </summary>
/// <param name="logicalLocationDictionary">
/// A dictionary whose keys are strings specifying a logical location and
/// whose values provide information about each component of the logical location.
/// </param>
void WriteLogicalLocations(IDictionary<string, LogicalLocation> logicalLocationsDictionary);
/// <summary>
/// Write information about rules to the log. This information may appear
/// after the results, as the relevant set of rules might not be known until
/// all results have been generated. A Sarif file may also contain only rules
/// metadata.
/// </summary>
/// <param name="fileDictionary">
/// A dictionary whose keys are the URIs of scanned files and whose values provide
/// information about those files.
/// </param>
void WriteRules(IDictionary<string, IRule> rules);
/// <summary>
/// Initialize the results array associated with the current output log. SARIF producers that
/// are explicitly generating results (as opposed to other SARIF scenarios such as publishing
/// rules metadata) should proactively call this method in order to ensure that an explicit
/// (but empty) results array exists in the log when no literal results were produced.
/// </summary>
void OpenResults();
/// <summary>
/// Writes a result to the log.
/// </summary>
/// <remarks>
/// This function makes a copy of the data stored in <paramref name="result"/>; if a
/// client wishes to reuse the result instance to avoid allocations they can do so. (This function
/// may invoke an internal copy of the result or serialize it in place to disk, etc.)
/// </remarks>
/// <exception cref="IOException">
/// A file IO error occured. Clients implementing
/// <see cref="IToolFileConverter"/> should allow these exceptions to propagate.
/// </exception>
/// <exception cref="InvalidOperationException">
/// Thrown if the tool info is not yet written.
/// </exception>
/// <exception cref="ArgumentNullException">
/// Thrown if <paramref name="result"/> is null.
/// </exception>
/// <param name="result">
/// The result to write.
/// </param>
void WriteResult(Result result);
/// <summary>
/// Close out the results array
/// </summary>
void CloseResults();
/// <summary>
/// Writes a set of results to the log.
/// </summary>
/// <remarks>
/// This function makes a copy of the data stored in <paramref name="results"/>; if a
/// client wishes to reuse the result instance to avoid allocations they can do so. (This function
/// may invoke an internal copy of the result or serialize it in place to disk, etc.)
/// </remarks>
/// <exception cref="IOException">
/// A file IO error occured. Clients implementing
/// <see cref="IToolFileConverter"/> should allow these exceptions to propagate.
/// </exception>
/// <exception cref="InvalidOperationException">
/// Thrown if the tool info is not yet written.
/// </exception>
/// <exception cref="ArgumentNullException">
/// Thrown if <paramref name="result"/> is null.
/// </exception>
/// <param name="results">
/// The results to write.
/// </param>
void WriteResults(IEnumerable<Result> results);
/// <summary>
/// Write a set of notifications relevant to the operation of the tool to the log.
/// </summary>
/// <param name="notifications">
/// The notifications to write.
/// </param>
void WriteToolNotifications(IEnumerable<Notification> notifications);
/// <summary>
/// Write a set of notifications relevant to the configuration of the tool to the log.
/// </summary>
/// <param name="notifications">
/// The notifications to write.
/// </param>
void WriteConfigurationNotifications(IEnumerable<Notification> notifications);
}
}
| 1 | 10,856 | Why this rename? | microsoft-sarif-sdk | .cs |
@@ -149,7 +149,7 @@ def _patch_sysmodules():
try:
yield
finally:
- if mock_main:
+ if mock_main and sys.version_info < (3, 3):
sys.modules.pop("__main__")
| 1 | # -*- coding: utf-8 -*-
# Copyright (c) 2006-2015 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2008 Fabrice Douchant <[email protected]>
# Copyright (c) 2009 Vincent
# Copyright (c) 2009 Mads Kiilerich <[email protected]>
# Copyright (c) 2011-2014 Google, Inc.
# Copyright (c) 2012 David Pursehouse <[email protected]>
# Copyright (c) 2012 Kevin Jing Qiu <[email protected]>
# Copyright (c) 2012 FELD Boris <[email protected]>
# Copyright (c) 2012 JT Olds <[email protected]>
# Copyright (c) 2014-2018 Claudiu Popa <[email protected]>
# Copyright (c) 2014-2015 Michal Nowikowski <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Alexandru Coman <[email protected]>
# Copyright (c) 2014 Daniel Harding <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2014 Dan Goldsmith <[email protected]>
# Copyright (c) 2015-2016 Florian Bruhin <[email protected]>
# Copyright (c) 2015 Aru Sahni <[email protected]>
# Copyright (c) 2015 Steven Myint <[email protected]>
# Copyright (c) 2015 Simu Toni <[email protected]>
# Copyright (c) 2015 Mihai Balint <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2016 Glenn Matthews <[email protected]>
# Copyright (c) 2016 Alan Evangelista <[email protected]>
# Copyright (c) 2017-2018 Ville Skyttä <[email protected]>
# Copyright (c) 2017-2018 hippo91 <[email protected]>
# Copyright (c) 2017 Daniel Miller <[email protected]>
# Copyright (c) 2017 Roman Ivanov <[email protected]>
# Copyright (c) 2017 Ned Batchelder <[email protected]>
# Copyright (c) 2018 Randall Leeds <[email protected]>
# Copyright (c) 2018 Mike Frysinger <[email protected]>
# Copyright (c) 2018 ssolanki <[email protected]>
# Copyright (c) 2018 Ville Skyttä <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2018 Anthony Sottile <[email protected]>
# Copyright (c) 2018 Jason Owen <[email protected]>
# Copyright (c) 2018 Gary Tyler McLeod <[email protected]>
# Copyright (c) 2018 Yuval Langer <[email protected]>
# Copyright (c) 2018 Nick Drozd <[email protected]>
# Copyright (c) 2018 kapsh <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
# pylint: disable=broad-except
""" %prog [options] modules_or_packages
Check that module(s) satisfy a coding standard (and more !).
%prog --help
Display this help message and exit.
%prog --help-msg <msg-id>[,<msg-id>]
Display help messages about given message identifiers and exit.
"""
from __future__ import print_function
import collections
import contextlib
import operator
import os
try:
import multiprocessing
except ImportError:
multiprocessing = None # type: ignore
import sys
import tokenize
import warnings
import astroid
from astroid.__pkginfo__ import version as astroid_version
from astroid import modutils
from pylint import checkers
from pylint import interfaces
from pylint import reporters
from pylint import exceptions
from pylint import utils
from pylint import config
from pylint.__pkginfo__ import version
from pylint.reporters.ureports import nodes as report_nodes
MANAGER = astroid.MANAGER
def _get_new_args(message):
location = (
message.abspath,
message.path,
message.module,
message.obj,
message.line,
message.column,
)
return (message.msg_id, message.symbol, location, message.msg, message.confidence)
def _get_python_path(filepath):
dirname = os.path.realpath(os.path.expanduser(filepath))
if not os.path.isdir(dirname):
dirname = os.path.dirname(dirname)
while True:
if not os.path.exists(os.path.join(dirname, "__init__.py")):
return dirname
old_dirname = dirname
dirname = os.path.dirname(dirname)
if old_dirname == dirname:
return os.getcwd()
return None
def _merge_stats(stats):
merged = {}
by_msg = collections.Counter()
for stat in stats:
message_stats = stat.pop("by_msg", {})
by_msg.update(message_stats)
for key, item in stat.items():
if key not in merged:
merged[key] = item
else:
if isinstance(item, dict):
merged[key].update(item)
else:
merged[key] = merged[key] + item
merged["by_msg"] = by_msg
return merged
@contextlib.contextmanager
def _patch_sysmodules():
# Context manager that permits running pylint, on Windows, with -m switch
# and with --jobs, as in 'python -2 -m pylint .. --jobs'.
# For more details why this is needed,
# see Python issue http://bugs.python.org/issue10845.
mock_main = __name__ != "__main__" # -m switch
if mock_main:
sys.modules["__main__"] = sys.modules[__name__]
try:
yield
finally:
if mock_main:
sys.modules.pop("__main__")
# Python Linter class #########################################################
MSGS = {
"F0001": (
"%s",
"fatal",
"Used when an error occurred preventing the analysis of a \
module (unable to find it for instance).",
),
"F0002": (
"%s: %s",
"astroid-error",
"Used when an unexpected error occurred while building the "
"Astroid representation. This is usually accompanied by a "
"traceback. Please report such errors !",
),
"F0010": (
"error while code parsing: %s",
"parse-error",
"Used when an exception occurred while building the Astroid "
"representation which could be handled by astroid.",
),
"I0001": (
"Unable to run raw checkers on built-in module %s",
"raw-checker-failed",
"Used to inform that a built-in module has not been checked "
"using the raw checkers.",
),
"I0010": (
"Unable to consider inline option %r",
"bad-inline-option",
"Used when an inline option is either badly formatted or can't "
"be used inside modules.",
),
"I0011": (
"Locally disabling %s (%s)",
"locally-disabled",
"Used when an inline option disables a message or a messages category.",
),
"I0013": (
"Ignoring entire file",
"file-ignored",
"Used to inform that the file will not be checked",
),
"I0020": (
"Suppressed %s (from line %d)",
"suppressed-message",
"A message was triggered on a line, but suppressed explicitly "
"by a disable= comment in the file. This message is not "
"generated for messages that are ignored due to configuration "
"settings.",
),
"I0021": (
"Useless suppression of %s",
"useless-suppression",
"Reported when a message is explicitly disabled for a line or "
"a block of code, but never triggered.",
),
"I0022": (
'Pragma "%s" is deprecated, use "%s" instead',
"deprecated-pragma",
"Some inline pylint options have been renamed or reworked, "
"only the most recent form should be used. "
"NOTE:skip-all is only available with pylint >= 0.26",
{"old_names": [("I0014", "deprecated-disable-all")]},
),
"E0001": ("%s", "syntax-error", "Used when a syntax error is raised for a module."),
"E0011": (
"Unrecognized file option %r",
"unrecognized-inline-option",
"Used when an unknown inline option is encountered.",
),
"E0012": (
"Bad option value %r",
"bad-option-value",
"Used when a bad value for an inline option is encountered.",
),
}
def _cpu_count() -> int:
"""Use sched_affinity if available for virtualized or containerized environments."""
sched_getaffinity = getattr(os, "sched_getaffinity", None)
# pylint: disable=not-callable,using-constant-test
if sched_getaffinity:
return len(sched_getaffinity(0))
if multiprocessing:
return multiprocessing.cpu_count()
return 1
if multiprocessing is not None:
class ChildLinter(multiprocessing.Process):
def run(self):
# pylint: disable=no-member, unbalanced-tuple-unpacking
tasks_queue, results_queue, self._config = self._args
self._config["jobs"] = 1 # Child does not parallelize any further.
self._python3_porting_mode = self._config.pop("python3_porting_mode", None)
self._plugins = self._config.pop("plugins", None)
# Run linter for received files/modules.
for file_or_module in iter(tasks_queue.get, "STOP"):
try:
result = self._run_linter(file_or_module[0])
results_queue.put(result)
except Exception as ex:
print(
"internal error with sending report for module %s"
% file_or_module,
file=sys.stderr,
)
print(ex, file=sys.stderr)
results_queue.put({})
def _run_linter(self, file_or_module):
linter = PyLinter()
# Register standard checkers.
linter.load_default_plugins()
# Load command line plugins.
if self._plugins:
linter.load_plugin_modules(self._plugins)
linter.load_configuration_from_config(self._config)
# Load plugin specific configuration
linter.load_plugin_configuration()
linter.set_reporter(reporters.CollectingReporter())
# Enable the Python 3 checker mode. This option is
# passed down from the parent linter up to here, since
# the Python 3 porting flag belongs to the Run class,
# instead of the Linter class.
if self._python3_porting_mode:
linter.python3_porting_mode()
# Run the checks.
linter.check(file_or_module)
msgs = [_get_new_args(m) for m in linter.reporter.messages]
return (
file_or_module,
linter.file_state.base_name,
linter.current_name,
msgs,
linter.stats,
linter.msg_status,
)
# pylint: disable=too-many-instance-attributes
class PyLinter(
config.OptionsManagerMixIn,
utils.MessagesHandlerMixIn,
utils.ReportsHandlerMixIn,
checkers.BaseTokenChecker,
):
"""lint Python modules using external checkers.
This is the main checker controlling the other ones and the reports
generation. It is itself both a raw checker and an astroid checker in order
to:
* handle message activation / deactivation at the module level
* handle some basic but necessary stats'data (number of classes, methods...)
IDE plugin developers: you may have to call
`astroid.builder.MANAGER.astroid_cache.clear()` across runs if you want
to ensure the latest code version is actually checked.
"""
__implements__ = (interfaces.ITokenChecker,)
name = "master"
priority = 0
level = 0
msgs = MSGS
@staticmethod
def make_options():
return (
(
"ignore",
{
"type": "csv",
"metavar": "<file>[,<file>...]",
"dest": "black_list",
"default": ("CVS",),
"help": "Add files or directories to the blacklist. "
"They should be base names, not paths.",
},
),
(
"ignore-patterns",
{
"type": "regexp_csv",
"metavar": "<pattern>[,<pattern>...]",
"dest": "black_list_re",
"default": (),
"help": "Add files or directories matching the regex patterns to the"
" blacklist. The regex matches against base names, not paths.",
},
),
(
"persistent",
{
"default": True,
"type": "yn",
"metavar": "<y_or_n>",
"level": 1,
"help": "Pickle collected data for later comparisons.",
},
),
(
"load-plugins",
{
"type": "csv",
"metavar": "<modules>",
"default": (),
"level": 1,
"help": "List of plugins (as comma separated values of "
"python modules names) to load, usually to register "
"additional checkers.",
},
),
(
"output-format",
{
"default": "text",
"type": "string",
"metavar": "<format>",
"short": "f",
"group": "Reports",
"help": "Set the output format. Available formats are text,"
" parseable, colorized, json and msvs (visual studio)."
" You can also give a reporter class, e.g. mypackage.mymodule."
"MyReporterClass.",
},
),
(
"reports",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"short": "r",
"group": "Reports",
"help": "Tells whether to display a full report or only the "
"messages.",
},
),
(
"evaluation",
{
"type": "string",
"metavar": "<python_expression>",
"group": "Reports",
"level": 1,
"default": "10.0 - ((float(5 * error + warning + refactor + "
"convention) / statement) * 10)",
"help": "Python expression which should return a note less "
"than 10 (10 is the highest note). You have access "
"to the variables errors warning, statement which "
"respectively contain the number of errors / "
"warnings messages and the total number of "
"statements analyzed. This is used by the global "
"evaluation report (RP0004).",
},
),
(
"score",
{
"default": True,
"type": "yn",
"metavar": "<y_or_n>",
"short": "s",
"group": "Reports",
"help": "Activate the evaluation score.",
},
),
(
"confidence",
{
"type": "multiple_choice",
"metavar": "<levels>",
"default": "",
"choices": [c.name for c in interfaces.CONFIDENCE_LEVELS],
"group": "Messages control",
"help": "Only show warnings with the listed confidence levels."
" Leave empty to show all. Valid levels: %s."
% (", ".join(c.name for c in interfaces.CONFIDENCE_LEVELS),),
},
),
(
"enable",
{
"type": "csv",
"metavar": "<msg ids>",
"short": "e",
"group": "Messages control",
"help": "Enable the message, report, category or checker with the "
"given id(s). You can either give multiple identifier "
"separated by comma (,) or put this option multiple time "
"(only on the command line, not in the configuration file "
"where it should appear only once). "
'See also the "--disable" option for examples.',
},
),
(
"disable",
{
"type": "csv",
"metavar": "<msg ids>",
"short": "d",
"group": "Messages control",
"help": "Disable the message, report, category or checker "
"with the given id(s). You can either give multiple identifiers "
"separated by comma (,) or put this option multiple times "
"(only on the command line, not in the configuration file "
"where it should appear only once). "
'You can also use "--disable=all" to disable everything first '
"and then reenable specific checks. For example, if you want "
"to run only the similarities checker, you can use "
'"--disable=all --enable=similarities". '
"If you want to run only the classes checker, but have no "
"Warning level messages displayed, use "
'"--disable=all --enable=classes --disable=W".',
},
),
(
"msg-template",
{
"type": "string",
"metavar": "<template>",
"group": "Reports",
"help": (
"Template used to display messages. "
"This is a python new-style format string "
"used to format the message information. "
"See doc for all details."
),
},
),
(
"jobs",
{
"type": "int",
"metavar": "<n-processes>",
"short": "j",
"default": 1,
"help": "Use multiple processes to speed up Pylint. Specifying 0 will "
"auto-detect the number of processors available to use.",
},
),
(
"unsafe-load-any-extension",
{
"type": "yn",
"metavar": "<yn>",
"default": False,
"hide": True,
"help": (
"Allow loading of arbitrary C extensions. Extensions"
" are imported into the active Python interpreter and"
" may run arbitrary code."
),
},
),
(
"limit-inference-results",
{
"type": "int",
"metavar": "<number-of-results>",
"default": 100,
"help": (
"Control the amount of potential inferred values when inferring "
"a single object. This can help the performance when dealing with "
"large functions or complex, nested conditions. "
),
},
),
(
"extension-pkg-whitelist",
{
"type": "csv",
"metavar": "<pkg[,pkg]>",
"default": [],
"help": (
"A comma-separated list of package or module names"
" from where C extensions may be loaded. Extensions are"
" loading into the active Python interpreter and may run"
" arbitrary code."
),
},
),
(
"suggestion-mode",
{
"type": "yn",
"metavar": "<yn>",
"default": True,
"help": (
"When enabled, pylint would attempt to guess common "
"misconfiguration and emit user-friendly hints instead "
"of false-positive error messages."
),
},
),
(
"exit-zero",
{
"action": "store_true",
"help": (
"Always return a 0 (non-error) status code, even if "
"lint errors are found. This is primarily useful in "
"continuous integration scripts."
),
},
),
)
option_groups = (
("Messages control", "Options controlling analysis messages"),
("Reports", "Options related to output formatting and reporting"),
)
def __init__(self, options=(), reporter=None, option_groups=(), pylintrc=None):
# some stuff has to be done before ancestors initialization...
#
# messages store / checkers / reporter / astroid manager
self.msgs_store = utils.MessagesStore()
self.reporter = None
self._reporter_name = None
self._reporters = {}
self._checkers = collections.defaultdict(list)
self._pragma_lineno = {}
self._ignore_file = False
# visit variables
self.file_state = utils.FileState()
self.current_name = None
self.current_file = None
self.stats = None
# init options
self._external_opts = options
self.options = options + PyLinter.make_options()
self.option_groups = option_groups + PyLinter.option_groups
self._options_methods = {"enable": self.enable, "disable": self.disable}
self._bw_options_methods = {
"disable-msg": self.disable,
"enable-msg": self.enable,
}
full_version = "%%prog %s\nastroid %s\nPython %s" % (
version,
astroid_version,
sys.version,
)
utils.MessagesHandlerMixIn.__init__(self)
utils.ReportsHandlerMixIn.__init__(self)
super(PyLinter, self).__init__(
usage=__doc__, version=full_version, config_file=pylintrc or config.PYLINTRC
)
checkers.BaseTokenChecker.__init__(self)
# provided reports
self.reports = (
("RP0001", "Messages by category", report_total_messages_stats),
(
"RP0002",
"% errors / warnings by module",
report_messages_by_module_stats,
),
("RP0003", "Messages", report_messages_stats),
)
self.register_checker(self)
self._dynamic_plugins = set()
self._python3_porting_mode = False
self._error_mode = False
self.load_provider_defaults()
if reporter:
self.set_reporter(reporter)
def load_default_plugins(self):
checkers.initialize(self)
reporters.initialize(self)
# Make sure to load the default reporter, because
# the option has been set before the plugins had been loaded.
if not self.reporter:
self._load_reporter()
def load_plugin_modules(self, modnames):
"""take a list of module names which are pylint plugins and load
and register them
"""
for modname in modnames:
if modname in self._dynamic_plugins:
continue
self._dynamic_plugins.add(modname)
module = modutils.load_module_from_name(modname)
module.register(self)
def load_plugin_configuration(self):
"""Call the configuration hook for plugins
This walks through the list of plugins, grabs the "load_configuration"
hook, if exposed, and calls it to allow plugins to configure specific
settings.
"""
for modname in self._dynamic_plugins:
module = modutils.load_module_from_name(modname)
if hasattr(module, "load_configuration"):
module.load_configuration(self)
def _load_reporter(self):
name = self._reporter_name.lower()
if name in self._reporters:
self.set_reporter(self._reporters[name]())
else:
try:
reporter_class = self._load_reporter_class()
except (ImportError, AttributeError):
raise exceptions.InvalidReporterError(name)
else:
self.set_reporter(reporter_class())
def _load_reporter_class(self):
qname = self._reporter_name
module = modutils.load_module_from_name(modutils.get_module_part(qname))
class_name = qname.split(".")[-1]
reporter_class = getattr(module, class_name)
return reporter_class
def set_reporter(self, reporter):
"""set the reporter used to display messages and reports"""
self.reporter = reporter
reporter.linter = self
def set_option(self, optname, value, action=None, optdict=None):
"""overridden from config.OptionsProviderMixin to handle some
special options
"""
if optname in self._options_methods or optname in self._bw_options_methods:
if value:
try:
meth = self._options_methods[optname]
except KeyError:
meth = self._bw_options_methods[optname]
warnings.warn(
"%s is deprecated, replace it by %s"
% (optname, optname.split("-")[0]),
DeprecationWarning,
)
value = utils._check_csv(value)
if isinstance(value, (list, tuple)):
for _id in value:
meth(_id, ignore_unknown=True)
else:
meth(value)
return # no need to call set_option, disable/enable methods do it
elif optname == "output-format":
self._reporter_name = value
# If the reporters are already available, load
# the reporter class.
if self._reporters:
self._load_reporter()
try:
checkers.BaseTokenChecker.set_option(self, optname, value, action, optdict)
except config.UnsupportedAction:
print("option %s can't be read from config file" % optname, file=sys.stderr)
def register_reporter(self, reporter_class):
self._reporters[reporter_class.name] = reporter_class
def report_order(self):
reports = sorted(self._reports, key=lambda x: getattr(x, "name", ""))
try:
# Remove the current reporter and add it
# at the end of the list.
reports.pop(reports.index(self))
except ValueError:
pass
else:
reports.append(self)
return reports
# checkers manipulation methods ############################################
def register_checker(self, checker):
"""register a new checker
checker is an object implementing IRawChecker or / and IAstroidChecker
"""
assert checker.priority <= 0, "checker priority can't be >= 0"
self._checkers[checker.name].append(checker)
for r_id, r_title, r_cb in checker.reports:
self.register_report(r_id, r_title, r_cb, checker)
self.register_options_provider(checker)
if hasattr(checker, "msgs"):
self.msgs_store.register_messages_from_checker(checker)
checker.load_defaults()
# Register the checker, but disable all of its messages.
# TODO(cpopa): we should have a better API for this.
if not getattr(checker, "enabled", True):
self.disable(checker.name)
def disable_noerror_messages(self):
for msgcat, msgids in self.msgs_store._msgs_by_category.items():
# enable only messages with 'error' severity and above ('fatal')
if msgcat in ["E", "F"]:
for msgid in msgids:
self.enable(msgid)
else:
for msgid in msgids:
self.disable(msgid)
def disable_reporters(self):
"""disable all reporters"""
for _reporters in self._reports.values():
for report_id, _, _ in _reporters:
self.disable_report(report_id)
def error_mode(self):
"""error mode: enable only errors; no reports, no persistent"""
self._error_mode = True
self.disable_noerror_messages()
self.disable("miscellaneous")
if self._python3_porting_mode:
self.disable("all")
for msg_id in self._checker_messages("python3"):
if msg_id.startswith("E"):
self.enable(msg_id)
config_parser = self.cfgfile_parser
if config_parser.has_option("MESSAGES CONTROL", "disable"):
value = config_parser.get("MESSAGES CONTROL", "disable")
self.global_set_option("disable", value)
else:
self.disable("python3")
self.set_option("reports", False)
self.set_option("persistent", False)
self.set_option("score", False)
def python3_porting_mode(self):
"""Disable all other checkers and enable Python 3 warnings."""
self.disable("all")
self.enable("python3")
if self._error_mode:
# The error mode was activated, using the -E flag.
# So we'll need to enable only the errors from the
# Python 3 porting checker.
for msg_id in self._checker_messages("python3"):
if msg_id.startswith("E"):
self.enable(msg_id)
else:
self.disable(msg_id)
config_parser = self.cfgfile_parser
if config_parser.has_option("MESSAGES CONTROL", "disable"):
value = config_parser.get("MESSAGES CONTROL", "disable")
self.global_set_option("disable", value)
self._python3_porting_mode = True
# block level option handling #############################################
#
# see func_block_disable_msg.py test case for expected behaviour
def process_tokens(self, tokens):
"""process tokens from the current module to search for module/block
level options
"""
control_pragmas = {"disable", "enable"}
for (tok_type, content, start, _, _) in tokens:
if tok_type != tokenize.COMMENT:
continue
match = utils.OPTION_RGX.search(content)
if match is None:
continue
first_group = match.group(1)
if (
first_group.strip() == "disable-all"
or first_group.strip() == "skip-file"
):
if first_group.strip() == "disable-all":
self.add_message(
"deprecated-pragma",
line=start[0],
args=("disable-all", "skip-file"),
)
self.add_message("file-ignored", line=start[0])
self._ignore_file = True
return
try:
opt, value = first_group.split("=", 1)
except ValueError:
self.add_message(
"bad-inline-option", args=first_group.strip(), line=start[0]
)
continue
opt = opt.strip()
if opt in self._options_methods or opt in self._bw_options_methods:
try:
meth = self._options_methods[opt]
except KeyError:
meth = self._bw_options_methods[opt]
# found a "(dis|en)able-msg" pragma deprecated suppression
self.add_message(
"deprecated-pragma",
line=start[0],
args=(opt, opt.replace("-msg", "")),
)
for msgid in utils._splitstrip(value):
# Add the line where a control pragma was encountered.
if opt in control_pragmas:
self._pragma_lineno[msgid] = start[0]
try:
if (opt, msgid) == ("disable", "all"):
self.add_message(
"deprecated-pragma",
line=start[0],
args=("disable=all", "skip-file"),
)
self.add_message("file-ignored", line=start[0])
self._ignore_file = True
return
meth(msgid, "module", start[0])
except exceptions.UnknownMessageError:
self.add_message("bad-option-value", args=msgid, line=start[0])
else:
self.add_message("unrecognized-inline-option", args=opt, line=start[0])
# code checking methods ###################################################
def get_checkers(self):
"""return all available checkers as a list"""
return [self] + [
c
for _checkers in self._checkers.values()
for c in _checkers
if c is not self
]
def prepare_checkers(self):
"""return checkers needed for activated messages and reports"""
if not self.config.reports:
self.disable_reporters()
# get needed checkers
neededcheckers = [self]
for checker in self.get_checkers()[1:]:
messages = {msg for msg in checker.msgs if self.is_message_enabled(msg)}
if messages or any(self.report_is_enabled(r[0]) for r in checker.reports):
neededcheckers.append(checker)
# Sort checkers by priority
neededcheckers = sorted(
neededcheckers, key=operator.attrgetter("priority"), reverse=True
)
return neededcheckers
# pylint: disable=unused-argument
@staticmethod
def should_analyze_file(modname, path, is_argument=False):
"""Returns whether or not a module should be checked.
This implementation returns True for all python source file, indicating
that all files should be linted.
Subclasses may override this method to indicate that modules satisfying
certain conditions should not be linted.
:param str modname: The name of the module to be checked.
:param str path: The full path to the source code of the module.
:param bool is_argument: Whetter the file is an argument to pylint or not.
Files which respect this property are always
checked, since the user requested it explicitly.
:returns: True if the module should be checked.
:rtype: bool
"""
if is_argument:
return True
return path.endswith(".py")
# pylint: enable=unused-argument
def check(self, files_or_modules):
"""main checking entry: check a list of files or modules from their
name.
"""
# initialize msgs_state now that all messages have been registered into
# the store
for msg in self.msgs_store.messages:
if not msg.may_be_emitted():
self._msgs_state[msg.msgid] = False
if not isinstance(files_or_modules, (list, tuple)):
files_or_modules = (files_or_modules,)
if self.config.jobs == 1:
self._do_check(files_or_modules)
else:
with _patch_sysmodules():
self._parallel_check(files_or_modules)
def _get_jobs_config(self):
child_config = collections.OrderedDict()
filter_options = {"long-help"}
filter_options.update((opt_name for opt_name, _ in self._external_opts))
for opt_providers in self._all_options.values():
for optname, optdict, val in opt_providers.options_and_values():
if optdict.get("deprecated"):
continue
if optname not in filter_options:
child_config[optname] = utils._format_option_value(optdict, val)
child_config["python3_porting_mode"] = self._python3_porting_mode
child_config["plugins"] = self._dynamic_plugins
return child_config
def _parallel_task(self, files_or_modules):
# Prepare configuration for child linters.
child_config = self._get_jobs_config()
children = []
manager = multiprocessing.Manager()
tasks_queue = manager.Queue()
results_queue = manager.Queue()
# Send files to child linters.
expanded_files = []
for descr in self.expand_files(files_or_modules):
modname, filepath, is_arg = descr["name"], descr["path"], descr["isarg"]
if self.should_analyze_file(modname, filepath, is_argument=is_arg):
expanded_files.append(descr)
# do not start more jobs than needed
for _ in range(min(self.config.jobs, len(expanded_files))):
child_linter = ChildLinter(args=(tasks_queue, results_queue, child_config))
child_linter.start()
children.append(child_linter)
for files_or_module in expanded_files:
path = files_or_module["path"]
tasks_queue.put([path])
# collect results from child linters
failed = False
for _ in expanded_files:
try:
result = results_queue.get()
except Exception as ex:
print(
"internal error while receiving results from child linter",
file=sys.stderr,
)
print(ex, file=sys.stderr)
failed = True
break
yield result
# Stop child linters and wait for their completion.
for _ in range(self.config.jobs):
tasks_queue.put("STOP")
for child in children:
child.join()
if failed:
print("Error occurred, stopping the linter.", file=sys.stderr)
sys.exit(32)
def _parallel_check(self, files_or_modules):
# Reset stats.
self.open()
all_stats = []
module = None
for result in self._parallel_task(files_or_modules):
if not result:
continue
(_, self.file_state.base_name, module, messages, stats, msg_status) = result
for msg in messages:
msg = utils.Message(*msg)
self.set_current_module(module)
self.reporter.handle_message(msg)
all_stats.append(stats)
self.msg_status |= msg_status
self.stats = _merge_stats(all_stats)
self.current_name = module
# Insert stats data to local checkers.
for checker in self.get_checkers():
if checker is not self:
checker.stats = self.stats
def _do_check(self, files_or_modules):
walker = utils.PyLintASTWalker(self)
_checkers = self.prepare_checkers()
tokencheckers = [
c
for c in _checkers
if interfaces.implements(c, interfaces.ITokenChecker) and c is not self
]
rawcheckers = [
c for c in _checkers if interfaces.implements(c, interfaces.IRawChecker)
]
# notify global begin
for checker in _checkers:
checker.open()
if interfaces.implements(checker, interfaces.IAstroidChecker):
walker.add_checker(checker)
# build ast and check modules or packages
for descr in self.expand_files(files_or_modules):
modname, filepath, is_arg = descr["name"], descr["path"], descr["isarg"]
if not self.should_analyze_file(modname, filepath, is_argument=is_arg):
continue
self.set_current_module(modname, filepath)
# get the module representation
ast_node = self.get_ast(filepath, modname)
if ast_node is None:
continue
# XXX to be correct we need to keep module_msgs_state for every
# analyzed module (the problem stands with localized messages which
# are only detected in the .close step)
self.file_state = utils.FileState(descr["basename"])
self._ignore_file = False
# fix the current file (if the source file was not available or
# if it's actually a c extension)
self.current_file = ast_node.file # pylint: disable=maybe-no-member
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
# warn about spurious inline messages handling
spurious_messages = self.file_state.iter_spurious_suppression_messages(
self.msgs_store
)
for msgid, line, args in spurious_messages:
self.add_message(msgid, line, None, args)
# notify global end
self.stats["statement"] = walker.nbstatements
for checker in reversed(_checkers):
checker.close()
def expand_files(self, modules):
"""get modules and errors from a list of modules and handle errors
"""
result, errors = utils.expand_modules(
modules, self.config.black_list, self.config.black_list_re
)
for error in errors:
message = modname = error["mod"]
key = error["key"]
self.set_current_module(modname)
if key == "fatal":
message = str(error["ex"]).replace(os.getcwd() + os.sep, "")
self.add_message(key, args=message)
return result
def set_current_module(self, modname, filepath=None):
"""set the name of the currently analyzed module and
init statistics for it
"""
if not modname and filepath is None:
return
self.reporter.on_set_current_module(modname, filepath)
self.current_name = modname
self.current_file = filepath or modname
self.stats["by_module"][modname] = {}
self.stats["by_module"][modname]["statement"] = 0
for msg_cat in utils.MSG_TYPES.values():
self.stats["by_module"][modname][msg_cat] = 0
def get_ast(self, filepath, modname):
"""return an ast(roid) representation for a module"""
try:
return MANAGER.ast_from_file(filepath, modname, source=True)
except astroid.AstroidSyntaxError as ex:
# pylint: disable=no-member
self.add_message(
"syntax-error", line=getattr(ex.error, "lineno", 0), args=str(ex.error)
)
except astroid.AstroidBuildingException as ex:
self.add_message("parse-error", args=ex)
except Exception as ex:
import traceback
traceback.print_exc()
self.add_message("astroid-error", args=(ex.__class__, ex))
def check_astroid_module(self, ast_node, walker, rawcheckers, tokencheckers):
"""Check a module from its astroid representation."""
try:
tokens = utils.tokenize_module(ast_node)
except tokenize.TokenError as ex:
self.add_message("syntax-error", line=ex.args[1][0], args=ex.args[0])
return None
if not ast_node.pure_python:
self.add_message("raw-checker-failed", args=ast_node.name)
else:
# assert astroid.file.endswith('.py')
# invoke ITokenChecker interface on self to fetch module/block
# level options
self.process_tokens(tokens)
if self._ignore_file:
return False
# walk ast to collect line numbers
self.file_state.collect_block_lines(self.msgs_store, ast_node)
# run raw and tokens checkers
for checker in rawcheckers:
checker.process_module(ast_node)
for checker in tokencheckers:
checker.process_tokens(tokens)
# generate events to astroid checkers
walker.walk(ast_node)
return True
# IAstroidChecker interface #################################################
def open(self):
"""initialize counters"""
self.stats = {"by_module": {}, "by_msg": {}}
MANAGER.always_load_extensions = self.config.unsafe_load_any_extension
MANAGER.max_inferable_values = self.config.limit_inference_results
MANAGER.extension_package_whitelist.update(self.config.extension_pkg_whitelist)
for msg_cat in utils.MSG_TYPES.values():
self.stats[msg_cat] = 0
def generate_reports(self):
"""close the whole package /module, it's time to make reports !
if persistent run, pickle results for later comparison
"""
# Display whatever messages are left on the reporter.
self.reporter.display_messages(report_nodes.Section())
if self.file_state.base_name is not None:
# load previous results if any
previous_stats = config.load_results(self.file_state.base_name)
# XXX code below needs refactoring to be more reporter agnostic
self.reporter.on_close(self.stats, previous_stats)
if self.config.reports:
sect = self.make_reports(self.stats, previous_stats)
else:
sect = report_nodes.Section()
if self.config.reports:
self.reporter.display_reports(sect)
self._report_evaluation()
# save results if persistent run
if self.config.persistent:
config.save_results(self.stats, self.file_state.base_name)
else:
self.reporter.on_close(self.stats, {})
def _report_evaluation(self):
"""make the global evaluation report"""
# check with at least check 1 statements (usually 0 when there is a
# syntax error preventing pylint from further processing)
previous_stats = config.load_results(self.file_state.base_name)
if self.stats["statement"] == 0:
return
# get a global note for the code
evaluation = self.config.evaluation
try:
note = eval(evaluation, {}, self.stats) # pylint: disable=eval-used
except Exception as ex:
msg = "An exception occurred while rating: %s" % ex
else:
self.stats["global_note"] = note
msg = "Your code has been rated at %.2f/10" % note
pnote = previous_stats.get("global_note")
if pnote is not None:
msg += " (previous run: %.2f/10, %+.2f)" % (pnote, note - pnote)
if self.config.score:
sect = report_nodes.EvaluationSection(msg)
self.reporter.display_reports(sect)
# some reporting functions ####################################################
def report_total_messages_stats(sect, stats, previous_stats):
"""make total errors / warnings report"""
lines = ["type", "number", "previous", "difference"]
lines += checkers.table_lines_from_stats(
stats, previous_stats, ("convention", "refactor", "warning", "error")
)
sect.append(report_nodes.Table(children=lines, cols=4, rheaders=1))
def report_messages_stats(sect, stats, _):
"""make messages type report"""
if not stats["by_msg"]:
# don't print this report when we didn't detected any errors
raise exceptions.EmptyReportError()
in_order = sorted(
[
(value, msg_id)
for msg_id, value in stats["by_msg"].items()
if not msg_id.startswith("I")
]
)
in_order.reverse()
lines = ("message id", "occurrences")
for value, msg_id in in_order:
lines += (msg_id, str(value))
sect.append(report_nodes.Table(children=lines, cols=2, rheaders=1))
def report_messages_by_module_stats(sect, stats, _):
"""make errors / warnings by modules report"""
if len(stats["by_module"]) == 1:
# don't print this report when we are analysing a single module
raise exceptions.EmptyReportError()
by_mod = collections.defaultdict(dict)
for m_type in ("fatal", "error", "warning", "refactor", "convention"):
total = stats[m_type]
for module in stats["by_module"].keys():
mod_total = stats["by_module"][module][m_type]
if total == 0:
percent = 0
else:
percent = float((mod_total) * 100) / total
by_mod[module][m_type] = percent
sorted_result = []
for module, mod_info in by_mod.items():
sorted_result.append(
(
mod_info["error"],
mod_info["warning"],
mod_info["refactor"],
mod_info["convention"],
module,
)
)
sorted_result.sort()
sorted_result.reverse()
lines = ["module", "error", "warning", "refactor", "convention"]
for line in sorted_result:
# Don't report clean modules.
if all(entry == 0 for entry in line[:-1]):
continue
lines.append(line[-1])
for val in line[:-1]:
lines.append("%.2f" % val)
if len(lines) == 5:
raise exceptions.EmptyReportError()
sect.append(report_nodes.Table(children=lines, cols=5, rheaders=1))
# utilities ###################################################################
class ArgumentPreprocessingError(Exception):
"""Raised if an error occurs during argument preprocessing."""
def preprocess_options(args, search_for):
"""look for some options (keys of <search_for>) which have to be processed
before others
values of <search_for> are callback functions to call when the option is
found
"""
i = 0
while i < len(args):
arg = args[i]
if arg.startswith("--"):
try:
option, val = arg[2:].split("=", 1)
except ValueError:
option, val = arg[2:], None
try:
cb, takearg = search_for[option]
except KeyError:
i += 1
else:
del args[i]
if takearg and val is None:
if i >= len(args) or args[i].startswith("-"):
msg = "Option %s expects a value" % option
raise ArgumentPreprocessingError(msg)
val = args[i]
del args[i]
elif not takearg and val is not None:
msg = "Option %s doesn't expects a value" % option
raise ArgumentPreprocessingError(msg)
cb(option, val)
else:
i += 1
@contextlib.contextmanager
def fix_import_path(args):
"""Prepare sys.path for running the linter checks.
Within this context, each of the given arguments is importable.
Paths are added to sys.path in corresponding order to the arguments.
We avoid adding duplicate directories to sys.path.
`sys.path` is reset to its original value upon exiting this context.
"""
orig = list(sys.path)
changes = []
for arg in args:
path = _get_python_path(arg)
if path in changes:
continue
else:
changes.append(path)
sys.path[:] = changes + ["."] + sys.path
try:
yield
finally:
sys.path[:] = orig
class Run:
"""helper class to use as main for pylint :
run(*sys.argv[1:])
"""
LinterClass = PyLinter
option_groups = (
(
"Commands",
"Options which are actually commands. Options in this \
group are mutually exclusive.",
),
)
def __init__(self, args, reporter=None, do_exit=True):
self._rcfile = None
self._plugins = []
self.verbose = None
try:
preprocess_options(
args,
{
# option: (callback, takearg)
"init-hook": (cb_init_hook, True),
"rcfile": (self.cb_set_rcfile, True),
"load-plugins": (self.cb_add_plugins, True),
"verbose": (self.cb_verbose_mode, False),
},
)
except ArgumentPreprocessingError as ex:
print(ex, file=sys.stderr)
sys.exit(32)
self.linter = linter = self.LinterClass(
(
(
"rcfile",
{
"action": "callback",
"callback": lambda *args: 1,
"type": "string",
"metavar": "<file>",
"help": "Specify a configuration file.",
},
),
(
"init-hook",
{
"action": "callback",
"callback": lambda *args: 1,
"type": "string",
"metavar": "<code>",
"level": 1,
"help": "Python code to execute, usually for sys.path "
"manipulation such as pygtk.require().",
},
),
(
"help-msg",
{
"action": "callback",
"type": "string",
"metavar": "<msg-id>",
"callback": self.cb_help_message,
"group": "Commands",
"help": "Display a help message for the given message id and "
"exit. The value may be a comma separated list of message ids.",
},
),
(
"list-msgs",
{
"action": "callback",
"metavar": "<msg-id>",
"callback": self.cb_list_messages,
"group": "Commands",
"level": 1,
"help": "Generate pylint's messages.",
},
),
(
"list-conf-levels",
{
"action": "callback",
"callback": cb_list_confidence_levels,
"group": "Commands",
"level": 1,
"help": "Generate pylint's confidence levels.",
},
),
(
"full-documentation",
{
"action": "callback",
"metavar": "<msg-id>",
"callback": self.cb_full_documentation,
"group": "Commands",
"level": 1,
"help": "Generate pylint's full documentation.",
},
),
(
"generate-rcfile",
{
"action": "callback",
"callback": self.cb_generate_config,
"group": "Commands",
"help": "Generate a sample configuration file according to "
"the current configuration. You can put other options "
"before this one to get them in the generated "
"configuration.",
},
),
(
"generate-man",
{
"action": "callback",
"callback": self.cb_generate_manpage,
"group": "Commands",
"help": "Generate pylint's man page.",
"hide": True,
},
),
(
"errors-only",
{
"action": "callback",
"callback": self.cb_error_mode,
"short": "E",
"help": "In error mode, checkers without error messages are "
"disabled and for others, only the ERROR messages are "
"displayed, and no reports are done by default.",
},
),
(
"py3k",
{
"action": "callback",
"callback": self.cb_python3_porting_mode,
"help": "In Python 3 porting mode, all checkers will be "
"disabled and only messages emitted by the porting "
"checker will be displayed.",
},
),
(
"verbose",
{
"action": "callback",
"callback": self.cb_verbose_mode,
"short": "v",
"help": "In verbose mode, extra non-checker-related info "
"will be displayed.",
},
),
),
option_groups=self.option_groups,
pylintrc=self._rcfile,
)
# register standard checkers
linter.load_default_plugins()
# load command line plugins
linter.load_plugin_modules(self._plugins)
# add some help section
linter.add_help_section("Environment variables", config.ENV_HELP, level=1)
# pylint: disable=bad-continuation
linter.add_help_section(
"Output",
"Using the default text output, the message format is : \n"
" \n"
" MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE \n"
" \n"
"There are 5 kind of message types : \n"
" * (C) convention, for programming standard violation \n"
" * (R) refactor, for bad code smell \n"
" * (W) warning, for python specific problems \n"
" * (E) error, for probable bugs in the code \n"
" * (F) fatal, if an error occurred which prevented pylint from doing further\n"
"processing.\n",
level=1,
)
linter.add_help_section(
"Output status code",
"Pylint should leave with following status code: \n"
" * 0 if everything went fine \n"
" * 1 if a fatal message was issued \n"
" * 2 if an error message was issued \n"
" * 4 if a warning message was issued \n"
" * 8 if a refactor message was issued \n"
" * 16 if a convention message was issued \n"
" * 32 on usage error \n"
" \n"
"status 1 to 16 will be bit-ORed so you can know which different categories has\n"
"been issued by analysing pylint output status code\n",
level=1,
)
# read configuration
linter.disable("I")
linter.enable("c-extension-no-member")
linter.read_config_file(verbose=self.verbose)
config_parser = linter.cfgfile_parser
# run init hook, if present, before loading plugins
if config_parser.has_option("MASTER", "init-hook"):
cb_init_hook(
"init-hook", utils._unquote(config_parser.get("MASTER", "init-hook"))
)
# is there some additional plugins in the file configuration, in
if config_parser.has_option("MASTER", "load-plugins"):
plugins = utils._splitstrip(config_parser.get("MASTER", "load-plugins"))
linter.load_plugin_modules(plugins)
# now we can load file config and command line, plugins (which can
# provide options) have been registered
linter.load_config_file()
if reporter:
# if a custom reporter is provided as argument, it may be overridden
# by file parameters, so re-set it here, but before command line
# parsing so it's still overrideable by command line option
linter.set_reporter(reporter)
try:
args = linter.load_command_line_configuration(args)
except SystemExit as exc:
if exc.code == 2: # bad options
exc.code = 32
raise
if not args:
print(linter.help())
sys.exit(32)
if linter.config.jobs < 0:
print(
"Jobs number (%d) should be greater than or equal to 0"
% linter.config.jobs,
file=sys.stderr,
)
sys.exit(32)
if linter.config.jobs > 1 or linter.config.jobs == 0:
if multiprocessing is None:
print(
"Multiprocessing library is missing, " "fallback to single process",
file=sys.stderr,
)
linter.set_option("jobs", 1)
else:
if linter.config.jobs == 0:
linter.config.jobs = _cpu_count()
# We have loaded configuration from config file and command line. Now, we can
# load plugin specific configuration.
linter.load_plugin_configuration()
# insert current working directory to the python path to have a correct
# behaviour
with fix_import_path(args):
linter.check(args)
linter.generate_reports()
if do_exit:
if linter.config.exit_zero:
sys.exit(0)
else:
sys.exit(self.linter.msg_status)
def cb_set_rcfile(self, name, value):
"""callback for option preprocessing (i.e. before option parsing)"""
self._rcfile = value
def cb_add_plugins(self, name, value):
"""callback for option preprocessing (i.e. before option parsing)"""
self._plugins.extend(utils._splitstrip(value))
def cb_error_mode(self, *args, **kwargs):
"""error mode:
* disable all but error messages
* disable the 'miscellaneous' checker which can be safely deactivated in
debug
* disable reports
* do not save execution information
"""
self.linter.error_mode()
def cb_generate_config(self, *args, **kwargs):
"""optik callback for sample config file generation"""
self.linter.generate_config(skipsections=("COMMANDS",))
sys.exit(0)
def cb_generate_manpage(self, *args, **kwargs):
"""optik callback for sample config file generation"""
from pylint import __pkginfo__
self.linter.generate_manpage(__pkginfo__)
sys.exit(0)
def cb_help_message(self, option, optname, value, parser):
"""optik callback for printing some help about a particular message"""
self.linter.msgs_store.help_message(utils._splitstrip(value))
sys.exit(0)
def cb_full_documentation(self, option, optname, value, parser):
"""optik callback for printing full documentation"""
self.linter.print_full_documentation()
sys.exit(0)
def cb_list_messages(self, option, optname, value, parser): # FIXME
"""optik callback for printing available messages"""
self.linter.msgs_store.list_messages()
sys.exit(0)
def cb_python3_porting_mode(self, *args, **kwargs):
"""Activate only the python3 porting checker."""
self.linter.python3_porting_mode()
def cb_verbose_mode(self, *args, **kwargs):
self.verbose = True
def cb_list_confidence_levels(option, optname, value, parser):
for level in interfaces.CONFIDENCE_LEVELS:
print("%-18s: %s" % level)
sys.exit(0)
def cb_init_hook(optname, value):
"""exec arbitrary code to set sys.path for instance"""
exec(value) # pylint: disable=exec-used
if __name__ == "__main__":
Run(sys.argv[1:])
| 1 | 10,882 | Why do we do this check here? `sys.version_info` cannot be less than 3.3 as pylint does not support older versions. I think we should avoid popping `__main__` altogether. | PyCQA-pylint | py |
@@ -199,6 +199,7 @@ def serv_cmd(codechecker_cfg, test_config):
'--port',
str(codechecker_cfg['viewer_port'])
])
+ # server_cmd.extend(['--verbose', 'debug'])
psql_cfg = codechecker_cfg.get('pg_db_config')
if psql_cfg: | 1 | # -----------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -----------------------------------------------------------------------------
import json
import os
import shlex
import subprocess
import time
from subprocess import CalledProcessError, Popen, PIPE, STDOUT
from . import project
def wait_for_postgres_shutdown(workspace):
"""
Wait for PostgreSQL to shut down.
Check if postmaster.pid file exists if yes postgres is still running.
"""
max_wait_time = 60
postmaster_pid_file = os.path.join(workspace,
'pgsql_data',
'postmaster.pid')
while os.path.isfile(postmaster_pid_file):
time.sleep(1)
max_wait_time -= 1
if max_wait_time == 0:
break
def login(codechecker_cfg, test_project_path, username, password):
"""
Log in to a server
"""
print("Logging in")
login_cmd = ['CodeChecker', 'cmd', 'login',
'-u', username,
'--verbose', 'debug',
'--host', 'localhost',
'--port', str(codechecker_cfg['viewer_port'])]
auth_creds = {'client_autologin': True,
"credentials": {"*": username+":"+password}}
auth_file = os.path.join(test_project_path, ".codechecker.passwords.json")
with open(auth_file, 'w') as outfile:
json.dump(auth_creds, outfile)
try:
print(' '.join(login_cmd))
out = subprocess.call(shlex.split(' '.join(login_cmd)),
cwd=test_project_path,
env=codechecker_cfg['check_env'])
print out
return 0
except OSError as cerr:
print("Failed to call:\n" + ' '.join(cerr))
return cerr.returncode
def check(codechecker_cfg, test_project_name, test_project_path):
"""
Check a test project.
:checkers parameter should be a list of enabled or disabled checkers
Example: ['-d', 'deadcode.DeadStores']
"""
build_cmd = project.get_build_cmd(test_project_path)
check_cmd = ['CodeChecker', 'check',
'-w', codechecker_cfg['workspace'],
'-n', test_project_name,
'-b', "'" + build_cmd + "'",
'--analyzers', 'clangsa',
'--quiet-build', '--verbose', 'debug']
check_cmd.extend(['--host', 'localhost',
'--port', str(codechecker_cfg['viewer_port'])
])
suppress_file = codechecker_cfg.get('suppress_file')
if suppress_file:
check_cmd.extend(['--suppress', suppress_file])
skip_file = codechecker_cfg.get('skip_file')
if skip_file:
check_cmd.extend(['--skip', skip_file])
force = codechecker_cfg.get('force')
if force:
check_cmd.extend(['--force'])
check_cmd.extend(codechecker_cfg['checkers'])
try:
print(' '.join(check_cmd))
proc = subprocess.call(shlex.split(' '.join(check_cmd)),
cwd=test_project_path,
env=codechecker_cfg['check_env'])
return 0
except CalledProcessError as cerr:
print("Failed to call:\n" + ' '.join(cerr.cmd))
return cerr.returncode
def analyze(codechecker_cfg, test_project_name, test_project_path):
"""
Analyze a test project.
:checkers parameter should be a list of enabled or disabled checkers
Example: ['-d', 'deadcode.DeadStores']
"""
build_cmd = project.get_build_cmd(test_project_path)
build_json = os.path.join(codechecker_cfg['workspace'], "build.json")
log_cmd = ['CodeChecker', 'log',
'-o', build_json,
'-b', "'" + build_cmd + "'",
]
analyze_cmd = ['CodeChecker', 'analyze',
build_json,
'-o', codechecker_cfg['reportdir'],
'--analyzers', 'clangsa'
]
analyze_cmd.extend(codechecker_cfg['checkers'])
try:
print("LOG:")
proc = subprocess.Popen(shlex.split(' '.join(log_cmd)),
cwd=test_project_path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=codechecker_cfg['check_env'])
out, err = proc.communicate()
print(out)
print(err)
print("ANALYZE:")
print(shlex.split(' '.join(analyze_cmd)))
proc = subprocess.Popen(shlex.split(' '.join(analyze_cmd)),
cwd=test_project_path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=codechecker_cfg['check_env'])
out, err = proc.communicate()
print(out)
print(err)
return 0
except CalledProcessError as cerr:
print("Failed to call:\n" + ' '.join(cerr.cmd))
return cerr.returncode
def store(codechecker_cfg, test_project_name, report_path):
"""
Store results from a report dir.
"""
store_cmd = ['CodeChecker', 'store',
'--host', 'localhost',
'--port', str(codechecker_cfg['viewer_port']),
'--name', test_project_name,
report_path]
try:
print("STORE:")
proc = subprocess.Popen(shlex.split(' '.join(store_cmd)),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=codechecker_cfg['check_env'])
out, err = proc.communicate()
print(out)
print(err)
return 0
except CalledProcessError as cerr:
print("Failed to call:\n" + ' '.join(cerr.cmd))
return cerr.returncode
def serv_cmd(codechecker_cfg, test_config):
server_cmd = ['CodeChecker', 'server',
'-w', codechecker_cfg['workspace']]
suppress_file = codechecker_cfg.get('suppress_file')
if suppress_file:
server_cmd.extend(['--suppress', suppress_file])
server_cmd.extend(['--host', 'localhost',
'--port',
str(codechecker_cfg['viewer_port'])
])
psql_cfg = codechecker_cfg.get('pg_db_config')
if psql_cfg:
server_cmd.append('--postgresql')
server_cmd += _pg_db_config_to_cmdline_params(psql_cfg)
print(server_cmd)
return server_cmd
def _pg_db_config_to_cmdline_params(pg_db_config):
"""Format postgres config dict to CodeChecker cmdline parameters."""
params = []
for key, value in pg_db_config.items():
params.append('--' + key)
params.append(str(value))
return params
| 1 | 7,496 | This can be removed too. | Ericsson-codechecker | c |
@@ -371,8 +371,11 @@ func (md *MDServerMemory) Put(ctx context.Context, rmds *RootMetadataSigned) err
// Consistency checks
if head != nil {
- err := head.MD.CheckValidSuccessorForServer(
- md.config.Crypto(), &rmds.MD)
+ id, err := md.config.Crypto().MakeMdID(&head.MD)
+ if err != nil {
+ return err
+ }
+ err = head.MD.CheckValidSuccessorForServer(id, &rmds.MD)
if err != nil {
return err
} | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"bytes"
"errors"
"fmt"
"reflect"
"sync"
"time"
"github.com/keybase/client/go/logger"
keybase1 "github.com/keybase/client/go/protocol"
"golang.org/x/net/context"
)
// An mdHandleKey is an encoded BareTlfHandle.
type mdHandleKey string
type mdBlockKey struct {
tlfID TlfID
branchID BranchID
}
type mdBranchKey struct {
tlfID TlfID
deviceKID keybase1.KID
}
type mdBlockMem struct {
// An encoded RootMetdataSigned.
encodedMd []byte
timestamp time.Time
}
type mdBlockMemList struct {
initialRevision MetadataRevision
blocks []mdBlockMem
}
type mdServerMemShared struct {
// Protects all *db variables and truncateLockManager. After
// Shutdown() is called, all *db variables and
// truncateLockManager are nil.
lock sync.RWMutex
// Bare TLF handle -> TLF ID
handleDb map[mdHandleKey]TlfID
// TLF ID -> latest bare TLF handle
latestHandleDb map[TlfID]BareTlfHandle
// (TLF ID, branch ID) -> list of MDs
mdDb map[mdBlockKey]mdBlockMemList
// (TLF ID, device KID) -> branch ID
branchDb map[mdBranchKey]BranchID
truncateLockManager *mdServerLocalTruncateLockManager
updateManager *mdServerLocalUpdateManager
}
// MDServerMemory just stores metadata objects in memory.
type MDServerMemory struct {
config Config
log logger.Logger
*mdServerMemShared
}
var _ mdServerLocal = (*MDServerMemory)(nil)
// NewMDServerMemory constructs a new MDServerMemory object that stores
// all data in-memory.
func NewMDServerMemory(config Config) (*MDServerMemory, error) {
handleDb := make(map[mdHandleKey]TlfID)
latestHandleDb := make(map[TlfID]BareTlfHandle)
mdDb := make(map[mdBlockKey]mdBlockMemList)
branchDb := make(map[mdBranchKey]BranchID)
log := config.MakeLogger("")
truncateLockManager := newMDServerLocalTruncatedLockManager()
shared := mdServerMemShared{
handleDb: handleDb,
latestHandleDb: latestHandleDb,
mdDb: mdDb,
branchDb: branchDb,
truncateLockManager: &truncateLockManager,
updateManager: newMDServerLocalUpdateManager(),
}
mdserv := &MDServerMemory{config, log, &shared}
return mdserv, nil
}
var errMDServerMemoryShutdown = errors.New("MDServerMemory is shutdown")
func (md *MDServerMemory) getHandleID(ctx context.Context, handle BareTlfHandle,
mStatus MergeStatus) (tlfID TlfID, created bool, err error) {
handleBytes, err := md.config.Codec().Encode(handle)
if err != nil {
return NullTlfID, false, MDServerError{err}
}
md.lock.Lock()
defer md.lock.Unlock()
if md.handleDb == nil {
return NullTlfID, false, errMDServerDiskShutdown
}
id, ok := md.handleDb[mdHandleKey(handleBytes)]
if ok {
return id, false, nil
}
// Non-readers shouldn't be able to create the dir.
_, uid, err := md.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return NullTlfID, false, MDServerError{err}
}
if !handle.IsReader(uid) {
return NullTlfID, false, MDServerErrorUnauthorized{}
}
// Allocate a new random ID.
id, err = md.config.Crypto().MakeRandomTlfID(handle.IsPublic())
if err != nil {
return NullTlfID, false, MDServerError{err}
}
md.handleDb[mdHandleKey(handleBytes)] = id
md.latestHandleDb[id] = handle
return id, true, nil
}
// GetForHandle implements the MDServer interface for MDServerMemory.
func (md *MDServerMemory) GetForHandle(ctx context.Context, handle BareTlfHandle,
mStatus MergeStatus) (TlfID, *RootMetadataSigned, error) {
id, created, err := md.getHandleID(ctx, handle, mStatus)
if err != nil {
return NullTlfID, nil, err
}
if created {
return id, nil, nil
}
rmds, err := md.GetForTLF(ctx, id, NullBranchID, mStatus)
if err != nil {
return NullTlfID, nil, err
}
return id, rmds, nil
}
func (md *MDServerMemory) checkGetParams(
ctx context.Context, id TlfID, bid BranchID, mStatus MergeStatus) (
newBid BranchID, err error) {
if mStatus == Merged && bid != NullBranchID {
return NullBranchID, MDServerErrorBadRequest{Reason: "Invalid branch ID"}
}
// Check permissions
mergedMasterHead, err :=
md.getHeadForTLF(ctx, id, NullBranchID, Merged)
if err != nil {
return NullBranchID, MDServerError{err}
}
_, currentUID, err := md.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return NullBranchID, MDServerError{err}
}
ok, err := isReader(currentUID, mergedMasterHead)
if err != nil {
return NullBranchID, MDServerError{err}
}
if !ok {
return NullBranchID, MDServerErrorUnauthorized{}
}
// Lookup the branch ID if not supplied
if mStatus == Unmerged && bid == NullBranchID {
return md.getBranchID(ctx, id)
}
return bid, nil
}
// GetForTLF implements the MDServer interface for MDServerMemory.
func (md *MDServerMemory) GetForTLF(ctx context.Context, id TlfID,
bid BranchID, mStatus MergeStatus) (*RootMetadataSigned, error) {
bid, err := md.checkGetParams(ctx, id, bid, mStatus)
if err != nil {
return nil, err
}
if mStatus == Unmerged && bid == NullBranchID {
return nil, nil
}
rmds, err := md.getHeadForTLF(ctx, id, bid, mStatus)
if err != nil {
return nil, MDServerError{err}
}
return rmds, nil
}
func (md *MDServerMemory) getHeadForTLF(ctx context.Context, id TlfID,
bid BranchID, mStatus MergeStatus) (*RootMetadataSigned, error) {
key, err := md.getMDKey(id, bid, mStatus)
if err != nil {
return nil, err
}
md.lock.Lock()
defer md.lock.Unlock()
if md.mdDb == nil {
return nil, errMDServerMemoryShutdown
}
blockList, ok := md.mdDb[key]
if !ok {
return nil, nil
}
blocks := blockList.blocks
var rmds RootMetadataSigned
err = md.config.Codec().Decode(blocks[len(blocks)-1].encodedMd, &rmds)
if err != nil {
return nil, err
}
return &rmds, nil
}
func (md *MDServerMemory) getMDKey(
id TlfID, bid BranchID, mStatus MergeStatus) (mdBlockKey, error) {
if (mStatus == Merged) != (bid == NullBranchID) {
return mdBlockKey{},
fmt.Errorf("mstatus=%v is inconsistent with bid=%v",
mStatus, bid)
}
return mdBlockKey{id, bid}, nil
}
func (md *MDServerMemory) getBranchKey(ctx context.Context, id TlfID) (
mdBranchKey, error) {
// add device KID
deviceKID, err := md.getCurrentDeviceKID(ctx)
if err != nil {
return mdBranchKey{}, err
}
return mdBranchKey{id, deviceKID}, nil
}
func (md *MDServerMemory) getCurrentDeviceKID(ctx context.Context) (keybase1.KID, error) {
key, err := md.config.KBPKI().GetCurrentCryptPublicKey(ctx)
if err != nil {
return keybase1.KID(""), err
}
return key.kid, nil
}
// GetRange implements the MDServer interface for MDServerMemory.
func (md *MDServerMemory) GetRange(ctx context.Context, id TlfID,
bid BranchID, mStatus MergeStatus, start, stop MetadataRevision) (
[]*RootMetadataSigned, error) {
md.log.CDebugf(ctx, "GetRange %d %d (%s)", start, stop, mStatus)
bid, err := md.checkGetParams(ctx, id, bid, mStatus)
if err != nil {
return nil, err
}
if mStatus == Unmerged && bid == NullBranchID {
return nil, nil
}
key, err := md.getMDKey(id, bid, mStatus)
if err != nil {
return nil, MDServerError{err}
}
md.lock.Lock()
defer md.lock.Unlock()
if md.mdDb == nil {
return nil, errMDServerMemoryShutdown
}
blockList, ok := md.mdDb[key]
if !ok {
return nil, nil
}
startI := int(start - blockList.initialRevision)
if startI < 0 {
startI = 0
}
endI := int(stop - blockList.initialRevision + 1)
blocks := blockList.blocks
if endI > len(blocks) {
endI = len(blocks)
}
var rmdses []*RootMetadataSigned
for i := startI; i < endI; i++ {
var rmds RootMetadataSigned
err = md.config.Codec().Decode(blocks[i].encodedMd, &rmds)
if err != nil {
return nil, MDServerError{err}
}
expectedRevision := blockList.initialRevision + MetadataRevision(i)
if expectedRevision != rmds.MD.Revision {
panic(fmt.Errorf("expected revision %v, got %v",
expectedRevision, rmds.MD.Revision))
}
rmdses = append(rmdses, &rmds)
}
return rmdses, nil
}
// Put implements the MDServer interface for MDServerMemory.
func (md *MDServerMemory) Put(ctx context.Context, rmds *RootMetadataSigned) error {
mStatus := rmds.MD.MergedStatus()
bid := rmds.MD.BID
if (mStatus == Merged) != (bid == NullBranchID) {
return MDServerErrorBadRequest{Reason: "Invalid branch ID"}
}
id := rmds.MD.ID
// Check permissions
_, currentUID, err := md.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return MDServerError{err}
}
mergedMasterHead, err :=
md.getHeadForTLF(ctx, id, NullBranchID, Merged)
if err != nil {
return MDServerError{err}
}
ok, err := isWriterOrValidRekey(
md.config.Codec(), currentUID, mergedMasterHead, rmds)
if err != nil {
return MDServerError{err}
}
if !ok {
return MDServerErrorUnauthorized{}
}
head, err := md.getHeadForTLF(ctx, id, bid, mStatus)
if err != nil {
return MDServerError{err}
}
var recordBranchID bool
if mStatus == Unmerged && head == nil {
// currHead for unmerged history might be on the main branch
prevRev := rmds.MD.Revision - 1
rmdses, err := md.GetRange(ctx, id, NullBranchID, Merged, prevRev, prevRev)
if err != nil {
return MDServerError{err}
}
if len(rmdses) != 1 {
return MDServerError{
Err: fmt.Errorf("Expected 1 MD block got %d", len(rmdses)),
}
}
head = rmdses[0]
recordBranchID = true
}
// Consistency checks
if head != nil {
err := head.MD.CheckValidSuccessorForServer(
md.config.Crypto(), &rmds.MD)
if err != nil {
return err
}
}
// Record branch ID
if recordBranchID {
branchKey, err := md.getBranchKey(ctx, id)
if err != nil {
return MDServerError{err}
}
err = func() error {
md.lock.Lock()
defer md.lock.Unlock()
if md.branchDb == nil {
return errMDServerMemoryShutdown
}
md.branchDb[branchKey] = bid
return nil
}()
if err != nil {
return err
}
}
encodedMd, err := md.config.Codec().Encode(rmds)
if err != nil {
return MDServerError{err}
}
block := mdBlockMem{encodedMd, md.config.Clock().Now()}
// Add an entry with the revision key.
revKey, err := md.getMDKey(id, bid, mStatus)
if err != nil {
return MDServerError{err}
}
md.lock.Lock()
defer md.lock.Unlock()
if md.mdDb == nil {
return errMDServerMemoryShutdown
}
blockList, ok := md.mdDb[revKey]
if ok {
blockList.blocks = append(blockList.blocks, block)
md.mdDb[revKey] = blockList
} else {
md.mdDb[revKey] = mdBlockMemList{
initialRevision: rmds.MD.Revision,
blocks: []mdBlockMem{block},
}
}
if mStatus == Merged &&
// Don't send notifies if it's just a rekey (the real mdserver
// sends a "folder needs rekey" notification in this case).
!(rmds.MD.IsRekeySet() && rmds.MD.IsWriterMetadataCopiedSet()) {
md.updateManager.setHead(id, md)
}
return nil
}
// PruneBranch implements the MDServer interface for MDServerMemory.
func (md *MDServerMemory) PruneBranch(ctx context.Context, id TlfID, bid BranchID) error {
if bid == NullBranchID {
return MDServerErrorBadRequest{Reason: "Invalid branch ID"}
}
currBID, err := md.getBranchID(ctx, id)
if err != nil {
return err
}
if currBID == NullBranchID || bid != currBID {
return MDServerErrorBadRequest{Reason: "Invalid branch ID"}
}
// Don't actually delete unmerged history. This is intentional to be consistent
// with the mdserver behavior-- it garbage collects discarded branches in the
// background.
branchKey, err := md.getBranchKey(ctx, id)
if err != nil {
return MDServerError{err}
}
md.lock.Lock()
defer md.lock.Unlock()
if md.mdDb == nil {
return errMDServerMemoryShutdown
}
delete(md.branchDb, branchKey)
return nil
}
func (md *MDServerMemory) getBranchID(ctx context.Context, id TlfID) (BranchID, error) {
branchKey, err := md.getBranchKey(ctx, id)
if err != nil {
return NullBranchID, MDServerError{err}
}
md.lock.Lock()
defer md.lock.Unlock()
if md.branchDb == nil {
return NullBranchID, errMDServerMemoryShutdown
}
bid, ok := md.branchDb[branchKey]
if !ok {
return NullBranchID, nil
}
return bid, nil
}
// RegisterForUpdate implements the MDServer interface for MDServerMemory.
func (md *MDServerMemory) RegisterForUpdate(ctx context.Context, id TlfID,
currHead MetadataRevision) (<-chan error, error) {
// are we already past this revision? If so, fire observer
// immediately
currMergedHeadRev, err := md.getCurrentMergedHeadRevision(ctx, id)
if err != nil {
return nil, err
}
c := md.updateManager.registerForUpdate(id, currHead, currMergedHeadRev, md)
return c, nil
}
func (md *MDServerMemory) getCurrentDeviceKIDBytes(ctx context.Context) (
[]byte, error) {
buf := &bytes.Buffer{}
deviceKID, err := md.getCurrentDeviceKID(ctx)
if err != nil {
return []byte{}, err
}
_, err = buf.Write(deviceKID.ToBytes())
if err != nil {
return []byte{}, err
}
return buf.Bytes(), nil
}
// TruncateLock implements the MDServer interface for MDServerMemory.
func (md *MDServerMemory) TruncateLock(ctx context.Context, id TlfID) (
bool, error) {
md.lock.Lock()
defer md.lock.Unlock()
if md.truncateLockManager == nil {
return false, errMDServerMemoryShutdown
}
myKID, err := md.getCurrentDeviceKID(ctx)
if err != nil {
return false, err
}
return md.truncateLockManager.truncateLock(myKID, id)
}
// TruncateUnlock implements the MDServer interface for MDServerMemory.
func (md *MDServerMemory) TruncateUnlock(ctx context.Context, id TlfID) (
bool, error) {
md.lock.Lock()
defer md.lock.Unlock()
if md.truncateLockManager == nil {
return false, errMDServerMemoryShutdown
}
myKID, err := md.getCurrentDeviceKID(ctx)
if err != nil {
return false, err
}
return md.truncateLockManager.truncateUnlock(myKID, id)
}
// Shutdown implements the MDServer interface for MDServerMemory.
func (md *MDServerMemory) Shutdown() {
md.lock.Lock()
defer md.lock.Unlock()
md.handleDb = nil
md.latestHandleDb = nil
md.branchDb = nil
md.truncateLockManager = nil
}
// IsConnected implements the MDServer interface for MDServerMemory.
func (md *MDServerMemory) IsConnected() bool {
return !md.isShutdown()
}
// RefreshAuthToken implements the MDServer interface for MDServerMemory.
func (md *MDServerMemory) RefreshAuthToken(ctx context.Context) {}
// This should only be used for testing with an in-memory server.
func (md *MDServerMemory) copy(config Config) mdServerLocal {
// NOTE: observers and sessionHeads are copied shallowly on
// purpose, so that the MD server that gets a Put will notify all
// observers correctly no matter where they got on the list.
log := config.MakeLogger("")
return &MDServerMemory{config, log, md.mdServerMemShared}
}
// isShutdown returns whether the logical, shared MDServer instance
// has been shut down.
func (md *MDServerMemory) isShutdown() bool {
md.lock.RLock()
defer md.lock.RUnlock()
return md.handleDb == nil
}
// DisableRekeyUpdatesForTesting implements the MDServer interface.
func (md *MDServerMemory) DisableRekeyUpdatesForTesting() {
// Nothing to do.
}
// CheckForRekeys implements the MDServer interface.
func (md *MDServerMemory) CheckForRekeys(ctx context.Context) <-chan error {
// Nothing to do
c := make(chan error, 1)
c <- nil
return c
}
func (md *MDServerMemory) addNewAssertionForTest(uid keybase1.UID,
newAssertion keybase1.SocialAssertion) error {
md.lock.Lock()
defer md.lock.Unlock()
if md.handleDb == nil {
return errMDServerMemoryShutdown
}
// Iterate through all the handles, and add handles for ones
// containing newAssertion to now include the uid.
for hBytes, id := range md.handleDb {
var h BareTlfHandle
err := md.config.Codec().Decode([]byte(hBytes), &h)
if err != nil {
return err
}
assertions := map[keybase1.SocialAssertion]keybase1.UID{
newAssertion: uid,
}
newH := h.ResolveAssertions(assertions)
if reflect.DeepEqual(h, newH) {
continue
}
newHBytes, err := md.config.Codec().Encode(newH)
if err != nil {
return err
}
md.handleDb[mdHandleKey(newHBytes)] = id
}
return nil
}
func (md *MDServerMemory) getCurrentMergedHeadRevision(
ctx context.Context, id TlfID) (rev MetadataRevision, err error) {
head, err := md.GetForTLF(ctx, id, NullBranchID, Merged)
if err != nil {
return 0, err
}
if head != nil {
rev = head.MD.Revision
}
return
}
// GetLatestHandleForTLF implements the MDServer interface for MDServerMemory.
func (md *MDServerMemory) GetLatestHandleForTLF(_ context.Context, id TlfID) (
BareTlfHandle, error) {
md.lock.RLock()
defer md.lock.RUnlock()
if md.latestHandleDb == nil {
return BareTlfHandle{}, errMDServerMemoryShutdown
}
return md.latestHandleDb[id], nil
}
| 1 | 12,078 | I'm a little worried about the new extra calls to `MakeMdId`, since in most local-server KBFS profiles, hashing is the biggest CPU user, and not re-using the value calculated by the client will result in even more hashing. That said, I understand why you did it this way, and I don't have a concrete suggestion on how to make it better. Just something to keep in mind; plus it might be worth comparing the test speed before and after this change to make sure it didn't slow things down too much. | keybase-kbfs | go |
@@ -29,9 +29,12 @@ import PreviewTable from 'GoogleComponents/preview-table';
*/
import { isDataZeroForReporting, getTopPagesReportDataDefaults } from '../util';
-const { __ } = wp.i18n;
-const { map } = lodash;
-const { Component } = wp.element;
+/**
+ * WordPress dependencies
+ */
+import { __ } from '@wordpress/i18n';
+import { map } from 'lodash';
+import { Component } from '@wordpress/element';
class WPAnalyticsDashboardWidgetTopPagesTable extends Component {
render() { | 1 | /**
* WPAnalyticsDashboardWidgetTopPagesTable component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import withData from 'GoogleComponents/higherorder/withdata';
import { TYPE_MODULES } from 'GoogleComponents/data';
import { getTimeInSeconds, numberFormat } from 'GoogleUtil';
import { getDataTableFromData, TableOverflowContainer } from 'GoogleComponents/data-table';
import PreviewTable from 'GoogleComponents/preview-table';
/**
* Internal dependencies
*/
import { isDataZeroForReporting, getTopPagesReportDataDefaults } from '../util';
const { __ } = wp.i18n;
const { map } = lodash;
const { Component } = wp.element;
class WPAnalyticsDashboardWidgetTopPagesTable extends Component {
render() {
const { data } = this.props;
const { siteURL: siteURL } = googlesitekit.admin;
if ( isDataZeroForReporting( data ) ) {
return null;
}
const links = [];
const dataMapped = map( data[ 0 ].data.rows, ( row, i ) => {
const [ title, url ] = row.dimensions;
links[ i ] = siteURL + url;
return [
title,
numberFormat( row.metrics[ 0 ].values[ 0 ] ),
];
} );
const options = {
hideHeader: true,
chartsEnabled: true,
links,
cap: 5,
showURLs: true,
};
const dataTable = getDataTableFromData( dataMapped, [], options );
return (
<div className="googlesitekit-search-console-widget">
<h2 className="googlesitekit-search-console-widget__title">
{ __( 'Top content over the last 28 days', 'google-site-kit' ) }
</h2>
<TableOverflowContainer>
{ dataTable }
</TableOverflowContainer>
</div>
);
}
}
export default withData(
WPAnalyticsDashboardWidgetTopPagesTable,
[
{
type: TYPE_MODULES,
identifier: 'analytics',
datapoint: 'report',
data: getTopPagesReportDataDefaults(),
priority: 1,
maxAge: getTimeInSeconds( 'day' ),
context: 'WPDashboard',
},
],
<PreviewTable rows={ 6 } />
);
| 1 | 24,752 | `lodash` shouldn't be grouped under WordPress dependencies | google-site-kit-wp | js |
@@ -1,6 +1,7 @@
+# -*- coding: UTF-8 -*-
#appModules/miranda32.py
#A part of NonVisual Desktop Access (NVDA)
-#Copyright (C) 2006-2012 NVDA Contributors
+#Copyright (C) 2006-2019 NV Access Limited, Aleksey Sadovoy, Peter Vágner, Joseph Lee, Bill Dengler
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
| 1 | #appModules/miranda32.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2012 NVDA Contributors
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import ui
import config
from ctypes import *
from ctypes.wintypes import *
import winKernel
from NVDAObjects.IAccessible import IAccessible, ContentGenericClient
from NVDAObjects.behaviors import Dialog
import appModuleHandler
import speech
import braille
import controlTypes
from scriptHandler import isScriptWaiting
import api
import mouseHandler
import oleacc
from keyboardHandler import KeyboardInputGesture
import watchdog
#contact list window messages
CLM_FIRST=0x1000 #this is the same as LVM_FIRST
CLM_LAST=0x1100
#messages, compare with equivalent TVM_s in the MSDN
CLM_ENSUREVISIBLE=CLM_FIRST+6 #wParam=hItem, lParam=partialOk
CLE_TOGGLE=-1
CLE_COLLAPSE=0
CLE_EXPAND=1
CLE_INVALID=0xFFFF
CLM_EXPAND=CLM_FIRST+7 #wParam=hItem, lParam=CLE_
CLM_FINDCONTACT=CLM_FIRST+8 #wParam=hContact, returns an hItem
CLM_FINDGROUP=CLM_FIRST+9 #wParam=hGroup, returns an hItem
CLM_GETBKCOLOR=CLM_FIRST+10 #returns a COLORREF
CLM_GETCHECKMARK=CLM_FIRST+11 #wParam=hItem, returns 1 or 0
CLM_GETCOUNT=CLM_FIRST+12 #returns the total number of items
CLM_GETEXPAND=CLM_FIRST+14 #wParam=hItem, returns a CLE_, CLE_INVALID if not a group
CLM_GETEXTRACOLUMNS=CLM_FIRST+15 #returns number of extra columns
CLM_GETEXTRAIMAGE=CLM_FIRST+16 #wParam=hItem, lParam=MAKELPARAM(iColumn (0 based),0), returns iImage or 0xFF
CLM_GETEXTRAIMAGELIST=CLM_FIRST+17 #returns HIMAGELIST
CLM_GETFONT=CLM_FIRST+18 #wParam=fontId, see clm_setfont. returns hFont.
CLM_GETINDENT=CLM_FIRST+19 #wParam=new group indent
CLM_GETISEARCHSTRING=CLM_FIRST+20 #lParam=(char*)pszStr, max 120 bytes, returns number of chars in string
MAXITEMTEXTLEN=120
CLM_GETITEMTEXT=CLM_FIRST+21 #wParam=hItem, lParam=(char*)pszStr, max 120 bytes
CLM_GETSELECTION=CLM_FIRST+23 #returns hItem
CLM_SELECTITEM=CLM_FIRST+26 #wParam=hItem
CLM_GETHIDEOFFLINEROOT=CLM_FIRST+40 #returns TRUE/FALSE
CLM_GETEXSTYLE=CLM_FIRST+44 #returns CLS_EX_ flags
CLM_GETLEFTMARGIN=CLM_FIRST+46 #returns count of pixels
CLCIT_INVALID=-1
CLCIT_GROUP=0
CLCIT_CONTACT=1
CLCIT_DIVIDER=2
CLCIT_INFO=3
CLM_GETITEMTYPE=CLM_FIRST+49 #wParam=hItem, returns a CLCIT_
CLGN_ROOT=0
CLGN_CHILD=1
CLGN_PARENT=2
CLGN_NEXT=3
CLGN_PREVIOUS=4
CLGN_NEXTCONTACT=5
CLGN_PREVIOUSCONTACT=6
CLGN_NEXTGROUP=7
CLGN_PREVIOUSGROUP=8
CLM_GETNEXTITEM=CLM_FIRST+50 #wParam=flag, lParam=hItem, returns an hItem
CLM_GETTEXTCOLOR=CLM_FIRST+51 #wParam=FONTID_, returns COLORREF
MAXSTATUSMSGLEN=256
CLM_GETSTATUSMSG=CLM_FIRST+105
#other constants
ANSILOGS=(1001,1006)
MESSAGEVIEWERS=(1001,1005,5005)
class AppModule(appModuleHandler.AppModule):
lastTextLengths={}
lastMessages=[]
# Must not be > 9.
MessageHistoryLength=3
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
if obj.role == controlTypes.ROLE_WINDOW:
return
windowClass = obj.windowClassName
if windowClass == "CListControl":
try:
clsList.remove(ContentGenericClient)
except ValueError:
pass
clsList.insert(0, mirandaIMContactList)
elif windowClass in ("MButtonClass", "TSButtonClass", "CLCButtonClass"):
clsList.insert(0, mirandaIMButton)
elif windowClass == "Hyperlink":
clsList.insert(0, mirandaIMHyperlink)
elif isinstance(obj, IAccessible) and obj.IAccessibleRole == oleacc.ROLE_SYSTEM_PROPERTYPAGE:
clsList.insert(0, MPropertyPage)
elif isinstance(obj, IAccessible) and obj.IAccessibleRole == oleacc.ROLE_SYSTEM_SCROLLBAR and obj.windowControlID in MESSAGEVIEWERS:
clsList.insert(0, MirandaMessageViewerScrollbar)
elif windowClass == "ListBox" and obj.windowControlID == 0:
clsList.insert(0, DuplicateFocusListBox)
def event_NVDAObject_init(self,obj):
if obj.windowClassName=="ColourPicker":
obj.role=controlTypes.ROLE_COLORCHOOSER
elif (obj.windowControlID in ANSILOGS) and (obj.windowClassName=="RichEdit20A"):
obj._isWindowUnicode=False
def script_readMessage(self,gesture):
num=int(gesture.mainKeyName[-1])
if len(self.lastMessages)>num-1:
ui.message(self.lastMessages[num-1])
else:
# Translators: This is presented to inform the user that no instant message has been received.
ui.message(_("No message yet"))
# Translators: The description of an NVDA command to view one of the recent messages.
script_readMessage.__doc__=_("Displays one of the recent messages")
def __init__(self, *args, **kwargs):
super(AppModule, self).__init__(*args, **kwargs)
for n in xrange(1, self.MessageHistoryLength + 1):
self.bindGesture("kb:NVDA+control+%s" % n, "readMessage")
class mirandaIMContactList(IAccessible):
def _get_name(self):
hItem=watchdog.cancellableSendMessage(self.windowHandle,CLM_GETSELECTION,0,0)
internalBuf=winKernel.virtualAllocEx(self.processHandle,None,MAXITEMTEXTLEN,winKernel.MEM_COMMIT,winKernel.PAGE_READWRITE)
try:
watchdog.cancellableSendMessage(self.windowHandle,CLM_GETITEMTEXT,hItem,internalBuf)
buf=create_unicode_buffer(MAXITEMTEXTLEN)
winKernel.readProcessMemory(self.processHandle,internalBuf,buf,MAXITEMTEXTLEN,None)
text=buf.value
statusMsgPtr=watchdog.cancellableSendMessage(self.windowHandle,CLM_GETSTATUSMSG,hItem,0)
if statusMsgPtr>0:
buf2=create_unicode_buffer(MAXSTATUSMSGLEN)
winKernel.readProcessMemory(self.processHandle,statusMsgPtr,buf2,MAXSTATUSMSGLEN,None)
text="%s %s"%(text,buf2.value)
finally:
winKernel.virtualFreeEx(self.processHandle,internalBuf,0,winKernel.MEM_RELEASE)
return text
def _get_role(self):
hItem=watchdog.cancellableSendMessage(self.windowHandle,CLM_GETSELECTION,0,0)
iType=watchdog.cancellableSendMessage(self.windowHandle,CLM_GETITEMTYPE,hItem,0)
if iType==CLCIT_DIVIDER or iType==CLCIT_INVALID: #some clists treat invalid as divider
return controlTypes.ROLE_SEPARATOR
else:
return controlTypes.ROLE_TREEVIEWITEM
def _get_states(self):
newStates=super(mirandaIMContactList,self)._get_states()
hItem=watchdog.cancellableSendMessage(self.windowHandle,CLM_GETSELECTION,0,0)
state=watchdog.cancellableSendMessage(self.windowHandle,CLM_GETEXPAND,hItem,0)
if state==CLE_EXPAND:
newStates.add(controlTypes.STATE_EXPANDED)
elif state==CLE_COLLAPSE:
newStates.add(controlTypes.STATE_COLLAPSED)
return newStates
def script_changeItem(self,gesture):
gesture.send()
if not isScriptWaiting():
api.processPendingEvents()
speech.speakObject(self,reason=controlTypes.REASON_FOCUS)
braille.handler.handleGainFocus(self)
__changeItemGestures = (
"kb:downArrow",
"kb:upArrow",
"kb:leftArrow",
"kb:rightArrow",
"kb:home",
"kb:end",
"kb:pageUp",
"kb:pageDown",
)
def initOverlayClass(self):
for gesture in self.__changeItemGestures:
self.bindGesture(gesture, "changeItem")
class mirandaIMButton(IAccessible):
def _get_name(self):
api.moveMouseToNVDAObject(self)
return super(mirandaIMButton,self)._get_name()
def _get_role(self):
return controlTypes.ROLE_BUTTON
def getActionName(self):
if controlTypes.STATE_FOCUSED not in self.states:
return
return "Click"
def doAction(self):
if controlTypes.STATE_FOCUSED not in self.states:
return
KeyboardInputGesture.fromName("space").send()
def script_doDefaultAction(self,gesture):
self.doAction()
def initOverlayClass(self):
self.bindGesture("kb:enter", "doDefaultAction")
class mirandaIMHyperlink(mirandaIMButton):
def _get_role(self):
return controlTypes.ROLE_LINK
class MPropertyPage(Dialog,IAccessible):
def _get_name(self):
name=super(MPropertyPage,self)._get_name()
if not name:
try:
tc=self.parent.next.firstChild
except AttributeError:
tc=None
if tc and tc.role==controlTypes.ROLE_TABCONTROL:
children=tc.children
for index in xrange(len(children)):
if (children[index].role==controlTypes.ROLE_TAB) and (controlTypes.STATE_SELECTED in children[index].states):
name=children[index].name
break
return name
class MirandaMessageViewerScrollbar(IAccessible):
def event_valueChange(self):
curTextLength=len(self.windowText)
if self.windowHandle not in self.appModule.lastTextLengths:
self.appModule.lastTextLengths[self.windowHandle]=curTextLength
elif self.appModule.lastTextLengths[self.windowHandle]<curTextLength:
message=self.windowText[self.appModule.lastTextLengths[self.windowHandle]:]
self.appModule.lastMessages.insert(0,message)
self.appModule.lastMessages=self.appModule.lastMessages[:self.appModule.MessageHistoryLength]
if config.conf["presentation"]["reportDynamicContentChanges"]:
ui.message(message)
self.appModule.lastTextLengths[self.windowHandle]=curTextLength
super(MirandaMessageViewerScrollbar,self).event_valueChange()
class DuplicateFocusListBox(IAccessible):
"""A list box which annoyingly fires focus events every second, even when a menu is open.
"""
def _get_shouldAllowIAccessibleFocusEvent(self):
# Stop annoying duplicate focus events, which are fired even if a menu is open.
focus = api.getFocusObject()
focusRole = focus.role
focusStates = focus.states
if (self == focus or
(focusRole == controlTypes.ROLE_MENUITEM and controlTypes.STATE_FOCUSED in focusStates) or
(focusRole == controlTypes.ROLE_POPUPMENU and controlTypes.STATE_INVISIBLE not in focusStates)
):
return False
return super(DuplicateFocusListBox, self).shouldAllowIAccessibleFocusEvent
| 1 | 24,817 | Just curious, but how did you work out that Aleksey Sadovoy, Peter Vgner and Joseph Lee contributed to this file? Did you look at the repository history? | nvaccess-nvda | py |
@@ -264,6 +264,19 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Transport.Libuv.Internal
_post.Reference();
_post.Dispose();
+ // We need this walk because we call ReadStop on 2 places:
+ // 1. On the dispatch pipe UvPipeHandle after reading the correct message
+ // 2. On on accepted connections when there's back pressure
+ // Calling ReadStop makes the handle as in-active which means the loop can
+ // end while there's still valid handles around. This makes loop.Dispose throw
+ // with an EBUSY. To avoid that, we walk all of the handles and dispose them.
+ Walk(ptr =>
+ {
+ var handle = UvMemory.FromIntPtr<UvHandle>(ptr);
+ // handle can be null because UvMemory.FromIntPtr looks up a weak reference
+ handle?.Dispose();
+ });
+
// Ensure the Dispose operations complete in the event loop.
_loop.Run();
| 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Runtime.ExceptionServices;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.Server.Kestrel.Internal.System.IO.Pipelines;
using Microsoft.AspNetCore.Server.Kestrel.Transport.Libuv.Internal.Networking;
using Microsoft.Extensions.Logging;
namespace Microsoft.AspNetCore.Server.Kestrel.Transport.Libuv.Internal
{
public class LibuvThread : IScheduler
{
// maximum times the work queues swapped and are processed in a single pass
// as completing a task may immediately have write data to put on the network
// otherwise it needs to wait till the next pass of the libuv loop
private readonly int _maxLoops = 8;
private readonly LibuvTransport _transport;
private readonly IApplicationLifetime _appLifetime;
private readonly Thread _thread;
private readonly TaskCompletionSource<object> _threadTcs = new TaskCompletionSource<object>(TaskCreationOptions.RunContinuationsAsynchronously);
private readonly UvLoopHandle _loop;
private readonly UvAsyncHandle _post;
private Queue<Work> _workAdding = new Queue<Work>(1024);
private Queue<Work> _workRunning = new Queue<Work>(1024);
private Queue<CloseHandle> _closeHandleAdding = new Queue<CloseHandle>(256);
private Queue<CloseHandle> _closeHandleRunning = new Queue<CloseHandle>(256);
private readonly object _workSync = new object();
private readonly object _startSync = new object();
private bool _stopImmediate = false;
private bool _initCompleted = false;
private ExceptionDispatchInfo _closeError;
private readonly ILibuvTrace _log;
public LibuvThread(LibuvTransport transport)
{
_transport = transport;
_appLifetime = transport.AppLifetime;
_log = transport.Log;
_loop = new UvLoopHandle(_log);
_post = new UvAsyncHandle(_log);
_thread = new Thread(ThreadStart);
_thread.Name = nameof(LibuvThread);
#if !DEBUG
// Mark the thread as being as unimportant to keeping the process alive.
// Don't do this for debug builds, so we know if the thread isn't terminating.
_thread.IsBackground = true;
#endif
QueueCloseHandle = PostCloseHandle;
QueueCloseAsyncHandle = EnqueueCloseHandle;
PipeFactory = new PipeFactory();
WriteReqPool = new WriteReqPool(this, _log);
}
// For testing
public LibuvThread(LibuvTransport transport, int maxLoops)
: this(transport)
{
_maxLoops = maxLoops;
}
public UvLoopHandle Loop { get { return _loop; } }
public PipeFactory PipeFactory { get; }
public WriteReqPool WriteReqPool { get; }
#if DEBUG
public List<WeakReference> Requests { get; } = new List<WeakReference>();
#endif
public ExceptionDispatchInfo FatalError { get { return _closeError; } }
public Action<Action<IntPtr>, IntPtr> QueueCloseHandle { get; }
private Action<Action<IntPtr>, IntPtr> QueueCloseAsyncHandle { get; }
public Task StartAsync()
{
var tcs = new TaskCompletionSource<int>(TaskCreationOptions.RunContinuationsAsynchronously);
_thread.Start(tcs);
return tcs.Task;
}
public async Task StopAsync(TimeSpan timeout)
{
lock (_startSync)
{
if (!_initCompleted)
{
return;
}
}
Debug.Assert(!_threadTcs.Task.IsCompleted, "The loop thread was completed before calling uv_unref on the post handle.");
var stepTimeout = TimeSpan.FromTicks(timeout.Ticks / 3);
try
{
Post(t => t.AllowStop());
if (!await WaitAsync(_threadTcs.Task, stepTimeout).ConfigureAwait(false))
{
Post(t => t.OnStopRude());
if (!await WaitAsync(_threadTcs.Task, stepTimeout).ConfigureAwait(false))
{
Post(t => t.OnStopImmediate());
if (!await WaitAsync(_threadTcs.Task, stepTimeout).ConfigureAwait(false))
{
_log.LogCritical($"{nameof(LibuvThread)}.{nameof(StopAsync)} failed to terminate libuv thread.");
}
}
}
}
catch (ObjectDisposedException)
{
if (!await WaitAsync(_threadTcs.Task, stepTimeout).ConfigureAwait(false))
{
_log.LogCritical($"{nameof(LibuvThread)}.{nameof(StopAsync)} failed to terminate libuv thread.");
}
}
_closeError?.Throw();
}
#if DEBUG
private void CheckUvReqLeaks()
{
GC.Collect();
GC.WaitForPendingFinalizers();
GC.Collect();
// Detect leaks in UvRequest objects
foreach (var request in Requests)
{
Debug.Assert(request.Target == null, $"{request.Target?.GetType()} object is still alive.");
}
}
#endif
private void AllowStop()
{
_post.Unreference();
}
private void OnStopRude()
{
Walk(ptr =>
{
var handle = UvMemory.FromIntPtr<UvHandle>(ptr);
if (handle != _post)
{
// handle can be null because UvMemory.FromIntPtr looks up a weak reference
handle?.Dispose();
}
});
}
private void OnStopImmediate()
{
_stopImmediate = true;
_loop.Stop();
}
public void Post<T>(Action<T> callback, T state)
{
lock (_workSync)
{
_workAdding.Enqueue(new Work
{
CallbackAdapter = CallbackAdapter<T>.PostCallbackAdapter,
Callback = callback,
State = state
});
}
_post.Send();
}
private void Post(Action<LibuvThread> callback)
{
Post(callback, this);
}
public Task PostAsync<T>(Action<T> callback, T state)
{
var tcs = new TaskCompletionSource<object>(TaskCreationOptions.RunContinuationsAsynchronously);
lock (_workSync)
{
_workAdding.Enqueue(new Work
{
CallbackAdapter = CallbackAdapter<T>.PostAsyncCallbackAdapter,
Callback = callback,
State = state,
Completion = tcs
});
}
_post.Send();
return tcs.Task;
}
public void Walk(Action<IntPtr> callback)
{
Walk((ptr, arg) => callback(ptr), IntPtr.Zero);
}
private void Walk(LibuvFunctions.uv_walk_cb callback, IntPtr arg)
{
_transport.Libuv.walk(
_loop,
callback,
arg
);
}
private void PostCloseHandle(Action<IntPtr> callback, IntPtr handle)
{
EnqueueCloseHandle(callback, handle);
_post.Send();
}
private void EnqueueCloseHandle(Action<IntPtr> callback, IntPtr handle)
{
lock (_workSync)
{
_closeHandleAdding.Enqueue(new CloseHandle { Callback = callback, Handle = handle });
}
}
private void ThreadStart(object parameter)
{
lock (_startSync)
{
var tcs = (TaskCompletionSource<int>)parameter;
try
{
_loop.Init(_transport.Libuv);
_post.Init(_loop, OnPost, EnqueueCloseHandle);
_initCompleted = true;
tcs.SetResult(0);
}
catch (Exception ex)
{
tcs.SetException(ex);
return;
}
}
try
{
_loop.Run();
if (_stopImmediate)
{
// thread-abort form of exit, resources will be leaked
return;
}
// run the loop one more time to delete the open handles
_post.Reference();
_post.Dispose();
// Ensure the Dispose operations complete in the event loop.
_loop.Run();
_loop.Dispose();
}
catch (Exception ex)
{
_closeError = ExceptionDispatchInfo.Capture(ex);
// Request shutdown so we can rethrow this exception
// in Stop which should be observable.
_appLifetime.StopApplication();
}
finally
{
PipeFactory.Dispose();
WriteReqPool.Dispose();
_threadTcs.SetResult(null);
#if DEBUG
// Check for handle leaks after disposing everything
CheckUvReqLeaks();
#endif
}
}
private void OnPost()
{
var loopsRemaining = _maxLoops;
bool wasWork;
do
{
wasWork = DoPostWork();
wasWork = DoPostCloseHandle() || wasWork;
loopsRemaining--;
} while (wasWork && loopsRemaining > 0);
}
private bool DoPostWork()
{
Queue<Work> queue;
lock (_workSync)
{
queue = _workAdding;
_workAdding = _workRunning;
_workRunning = queue;
}
bool wasWork = queue.Count > 0;
while (queue.Count != 0)
{
var work = queue.Dequeue();
try
{
work.CallbackAdapter(work.Callback, work.State);
work.Completion?.TrySetResult(null);
}
catch (Exception ex)
{
if (work.Completion != null)
{
work.Completion.TrySetException(ex);
}
else
{
_log.LogError(0, ex, $"{nameof(LibuvThread)}.{nameof(DoPostWork)}");
throw;
}
}
}
return wasWork;
}
private bool DoPostCloseHandle()
{
Queue<CloseHandle> queue;
lock (_workSync)
{
queue = _closeHandleAdding;
_closeHandleAdding = _closeHandleRunning;
_closeHandleRunning = queue;
}
bool wasWork = queue.Count > 0;
while (queue.Count != 0)
{
var closeHandle = queue.Dequeue();
try
{
closeHandle.Callback(closeHandle.Handle);
}
catch (Exception ex)
{
_log.LogError(0, ex, $"{nameof(LibuvThread)}.{nameof(DoPostCloseHandle)}");
throw;
}
}
return wasWork;
}
private static async Task<bool> WaitAsync(Task task, TimeSpan timeout)
{
return await Task.WhenAny(task, Task.Delay(timeout)).ConfigureAwait(false) == task;
}
public void Schedule(Action action)
{
Post(state => state(), action);
}
private struct Work
{
public Action<object, object> CallbackAdapter;
public object Callback;
public object State;
public TaskCompletionSource<object> Completion;
}
private struct CloseHandle
{
public Action<IntPtr> Callback;
public IntPtr Handle;
}
private class CallbackAdapter<T>
{
public static readonly Action<object, object> PostCallbackAdapter = (callback, state) => ((Action<T>)callback).Invoke((T)state);
public static readonly Action<object, object> PostAsyncCallbackAdapter = (callback, state) => ((Action<T>)callback).Invoke((T)state);
}
}
}
| 1 | 13,107 | You could call OnStopRude() directly here to do the same thing. | aspnet-KestrelHttpServer | .cs |
@@ -643,6 +643,8 @@ func (a *WebAPI) ListUnregisteredApplications(ctx context.Context, _ *webservice
return &webservice.ListUnregisteredApplicationsResponse{}, nil
}
+ // TODO: Eliminate apps registered by another Piped
+ // In case that multiple Pipeds watches the same repo, it could be happen that one Piped report App-1 as an unregistered app even though another Piped register that app.
sort.Slice(allApps, func(i, j int) bool {
return allApps[i].Path < allApps[j].Path
}) | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpcapi
import (
"bytes"
"context"
"encoding/gob"
"errors"
"fmt"
"sort"
"strings"
"time"
"github.com/google/uuid"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/pipe-cd/pipe/pkg/app/api/applicationlivestatestore"
"github.com/pipe-cd/pipe/pkg/app/api/commandstore"
"github.com/pipe-cd/pipe/pkg/app/api/service/webservice"
"github.com/pipe-cd/pipe/pkg/app/api/stagelogstore"
"github.com/pipe-cd/pipe/pkg/cache"
"github.com/pipe-cd/pipe/pkg/cache/memorycache"
"github.com/pipe-cd/pipe/pkg/cache/rediscache"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/datastore"
"github.com/pipe-cd/pipe/pkg/filestore"
"github.com/pipe-cd/pipe/pkg/insight/insightstore"
"github.com/pipe-cd/pipe/pkg/model"
"github.com/pipe-cd/pipe/pkg/redis"
"github.com/pipe-cd/pipe/pkg/rpc/rpcauth"
)
type encrypter interface {
Encrypt(text string) (string, error)
}
// WebAPI implements the behaviors for the gRPC definitions of WebAPI.
type WebAPI struct {
applicationStore datastore.ApplicationStore
environmentStore datastore.EnvironmentStore
deploymentStore datastore.DeploymentStore
pipedStore datastore.PipedStore
projectStore datastore.ProjectStore
apiKeyStore datastore.APIKeyStore
stageLogStore stagelogstore.Store
applicationLiveStateStore applicationlivestatestore.Store
commandStore commandstore.Store
insightStore insightstore.Store
encrypter encrypter
appProjectCache cache.Cache
deploymentProjectCache cache.Cache
pipedProjectCache cache.Cache
envProjectCache cache.Cache
insightCache cache.Cache
redis redis.Redis
projectsInConfig map[string]config.ControlPlaneProject
logger *zap.Logger
}
// NewWebAPI creates a new WebAPI instance.
func NewWebAPI(
ctx context.Context,
ds datastore.DataStore,
fs filestore.Store,
sls stagelogstore.Store,
alss applicationlivestatestore.Store,
cmds commandstore.Store,
is insightstore.Store,
rd redis.Redis,
projs map[string]config.ControlPlaneProject,
encrypter encrypter,
logger *zap.Logger) *WebAPI {
a := &WebAPI{
applicationStore: datastore.NewApplicationStore(ds),
environmentStore: datastore.NewEnvironmentStore(ds),
deploymentStore: datastore.NewDeploymentStore(ds),
pipedStore: datastore.NewPipedStore(ds),
projectStore: datastore.NewProjectStore(ds),
apiKeyStore: datastore.NewAPIKeyStore(ds),
stageLogStore: sls,
applicationLiveStateStore: alss,
commandStore: cmds,
insightStore: is,
projectsInConfig: projs,
encrypter: encrypter,
appProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
deploymentProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
pipedProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
envProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
insightCache: rediscache.NewTTLCache(rd, 3*time.Hour),
redis: rd,
logger: logger.Named("web-api"),
}
return a
}
// Register registers all handling of this service into the specified gRPC server.
func (a *WebAPI) Register(server *grpc.Server) {
webservice.RegisterWebServiceServer(server, a)
}
func (a *WebAPI) AddEnvironment(ctx context.Context, req *webservice.AddEnvironmentRequest) (*webservice.AddEnvironmentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
env := model.Environment{
Id: uuid.New().String(),
Name: req.Name,
Desc: req.Desc,
ProjectId: claims.Role.ProjectId,
}
err = a.environmentStore.AddEnvironment(ctx, &env)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The environment already exists")
}
if err != nil {
a.logger.Error("failed to create environment", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to create environment")
}
return &webservice.AddEnvironmentResponse{}, nil
}
func (a *WebAPI) UpdateEnvironmentDesc(ctx context.Context, req *webservice.UpdateEnvironmentDescRequest) (*webservice.UpdateEnvironmentDescResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *WebAPI) ListEnvironments(ctx context.Context, req *webservice.ListEnvironmentsRequest) (*webservice.ListEnvironmentsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: claims.Role.ProjectId,
},
},
}
envs, err := a.environmentStore.ListEnvironments(ctx, opts)
if err != nil {
a.logger.Error("failed to get environments", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get environments")
}
return &webservice.ListEnvironmentsResponse{
Environments: envs,
}, nil
}
func (a *WebAPI) EnableEnvironment(ctx context.Context, req *webservice.EnableEnvironmentRequest) (*webservice.EnableEnvironmentResponse, error) {
if err := a.updateEnvironmentEnable(ctx, req.EnvironmentId, true); err != nil {
return nil, err
}
return &webservice.EnableEnvironmentResponse{}, nil
}
func (a *WebAPI) DisableEnvironment(ctx context.Context, req *webservice.DisableEnvironmentRequest) (*webservice.DisableEnvironmentResponse, error) {
if err := a.updateEnvironmentEnable(ctx, req.EnvironmentId, false); err != nil {
return nil, err
}
return &webservice.DisableEnvironmentResponse{}, nil
}
// DeleteEnvironment deletes the given environment and all applications that belong to it.
// It returns a FailedPrecondition error if any Piped is still using that environment.
func (a *WebAPI) DeleteEnvironment(ctx context.Context, req *webservice.DeleteEnvironmentRequest) (*webservice.DeleteEnvironmentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateEnvBelongsToProject(ctx, req.EnvironmentId, claims.Role.ProjectId); err != nil {
return nil, err
}
// Check if no Piped has permission to the given environment.
pipeds, err := a.pipedStore.ListPipeds(ctx, datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: claims.Role.ProjectId,
},
{
Field: "EnvIds",
Operator: datastore.OperatorContains,
Value: req.EnvironmentId,
},
{
Field: "Disabled",
Operator: datastore.OperatorEqual,
Value: false,
},
},
})
if err != nil {
a.logger.Error("failed to fetch Pipeds linked to the given environment",
zap.String("env-id", req.EnvironmentId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to validate the deletion operation")
}
if len(pipeds) > 0 {
pipedNames := make([]string, 0, len(pipeds))
for _, p := range pipeds {
pipedNames = append(pipedNames, p.Name)
}
return nil, status.Errorf(
codes.FailedPrecondition,
"Found Pipeds linked the environment to be deleted. Please remove this environment from all Pipeds (%s) on the Piped settings page",
strings.Join(pipedNames, ","),
)
}
// Delete all applications that belongs to the given env.
apps, _, err := a.applicationStore.ListApplications(ctx, datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: claims.Role.ProjectId,
},
{
Field: "EnvId",
Operator: datastore.OperatorEqual,
Value: req.EnvironmentId,
},
},
})
if err != nil {
a.logger.Error("failed to fetch applications that belongs to the given environment",
zap.String("env-id", req.EnvironmentId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to fetch applications that belongs to the given environment")
}
for _, app := range apps {
if app.ProjectId != claims.Role.ProjectId {
continue
}
err := a.applicationStore.DeleteApplication(ctx, app.Id)
if err == nil {
continue
}
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.Internal, "The application is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "Invalid value to delete")
default:
a.logger.Error("failed to delete the application",
zap.String("application-id", app.Id),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to delete the application")
}
}
if err := a.environmentStore.DeleteEnvironment(ctx, req.EnvironmentId); err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.NotFound, "The environment is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "Invalid value to delete")
default:
a.logger.Error("failed to delete the environment",
zap.String("env-id", req.EnvironmentId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to delete the environment")
}
}
return &webservice.DeleteEnvironmentResponse{}, nil
}
func (a *WebAPI) updateEnvironmentEnable(ctx context.Context, envID string, enable bool) error {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return err
}
if err := a.validateEnvBelongsToProject(ctx, envID, claims.Role.ProjectId); err != nil {
return err
}
var updater func(context.Context, string) error
if enable {
updater = a.environmentStore.EnableEnvironment
} else {
updater = a.environmentStore.DisableEnvironment
}
if err := updater(ctx, envID); err != nil {
switch err {
case datastore.ErrNotFound:
return status.Error(codes.NotFound, "The environment is not found")
case datastore.ErrInvalidArgument:
return status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to update the environment",
zap.String("env-id", envID),
zap.Error(err),
)
return status.Error(codes.Internal, "Failed to update the environment")
}
}
return nil
}
// validateEnvBelongsToProject checks if the given piped belongs to the given project.
// It gives back error unless the env belongs to the project.
func (a *WebAPI) validateEnvBelongsToProject(ctx context.Context, envID, projectID string) error {
eid, err := a.envProjectCache.Get(envID)
if err == nil {
if projectID != eid {
return status.Error(codes.PermissionDenied, "Requested environment doesn't belong to the project you logged in")
}
return nil
}
env, err := getEnvironment(ctx, a.environmentStore, envID, a.logger)
if err != nil {
return err
}
a.envProjectCache.Put(envID, env.ProjectId)
if projectID != env.ProjectId {
return status.Error(codes.PermissionDenied, "Requested environment doesn't belong to the project you logged in")
}
return nil
}
func (a *WebAPI) RegisterPiped(ctx context.Context, req *webservice.RegisterPipedRequest) (*webservice.RegisterPipedResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
key, keyHash, err := model.GeneratePipedKey()
if err != nil {
a.logger.Error("failed to generate piped key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to generate the piped key")
}
piped := model.Piped{
Id: uuid.New().String(),
Name: req.Name,
Desc: req.Desc,
ProjectId: claims.Role.ProjectId,
EnvIds: req.EnvIds,
Status: model.Piped_OFFLINE,
}
if err := piped.AddKey(keyHash, claims.Subject, time.Now()); err != nil {
return nil, status.Error(codes.FailedPrecondition, fmt.Sprintf("Failed to create key: %v", err))
}
err = a.pipedStore.AddPiped(ctx, &piped)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The piped already exists")
}
if err != nil {
a.logger.Error("failed to register piped", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to register piped")
}
return &webservice.RegisterPipedResponse{
Id: piped.Id,
Key: key,
}, nil
}
func (a *WebAPI) UpdatePiped(ctx context.Context, req *webservice.UpdatePipedRequest) (*webservice.UpdatePipedResponse, error) {
updater := func(ctx context.Context, pipedID string) error {
return a.pipedStore.UpdatePiped(ctx, req.PipedId, func(p *model.Piped) error {
p.Name = req.Name
p.Desc = req.Desc
p.EnvIds = req.EnvIds
return nil
})
}
if err := a.updatePiped(ctx, req.PipedId, updater); err != nil {
return nil, err
}
return &webservice.UpdatePipedResponse{}, nil
}
func (a *WebAPI) RecreatePipedKey(ctx context.Context, req *webservice.RecreatePipedKeyRequest) (*webservice.RecreatePipedKeyResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
key, keyHash, err := model.GeneratePipedKey()
if err != nil {
a.logger.Error("failed to generate piped key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to generate the piped key")
}
updater := func(ctx context.Context, pipedID string) error {
return a.pipedStore.AddKey(ctx, pipedID, keyHash, claims.Subject, time.Now())
}
if err := a.updatePiped(ctx, req.Id, updater); err != nil {
return nil, err
}
return &webservice.RecreatePipedKeyResponse{
Key: key,
}, nil
}
func (a *WebAPI) DeleteOldPipedKeys(ctx context.Context, req *webservice.DeleteOldPipedKeysRequest) (*webservice.DeleteOldPipedKeysResponse, error) {
if _, err := rpcauth.ExtractClaims(ctx); err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
updater := func(ctx context.Context, pipedID string) error {
return a.pipedStore.DeleteOldKeys(ctx, pipedID)
}
if err := a.updatePiped(ctx, req.PipedId, updater); err != nil {
return nil, err
}
return &webservice.DeleteOldPipedKeysResponse{}, nil
}
func (a *WebAPI) EnablePiped(ctx context.Context, req *webservice.EnablePipedRequest) (*webservice.EnablePipedResponse, error) {
if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.EnablePiped); err != nil {
return nil, err
}
return &webservice.EnablePipedResponse{}, nil
}
func (a *WebAPI) DisablePiped(ctx context.Context, req *webservice.DisablePipedRequest) (*webservice.DisablePipedResponse, error) {
if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.DisablePiped); err != nil {
return nil, err
}
return &webservice.DisablePipedResponse{}, nil
}
func (a *WebAPI) updatePiped(ctx context.Context, pipedID string, updater func(context.Context, string) error) error {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return err
}
if err := a.validatePipedBelongsToProject(ctx, pipedID, claims.Role.ProjectId); err != nil {
return err
}
if err := updater(ctx, pipedID); err != nil {
switch err {
case datastore.ErrNotFound:
return status.Error(codes.InvalidArgument, "The piped is not found")
case datastore.ErrInvalidArgument:
return status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to update the piped",
zap.String("piped-id", pipedID),
zap.Error(err),
)
// TODO: Improve error handling, instead of considering all as Internal error like this
// we should check the error type to decide to pass its message to the web client or just a generic message.
return status.Error(codes.Internal, "Failed to update the piped")
}
}
return nil
}
// TODO: Consider using piped-stats to decide piped connection status.
func (a *WebAPI) ListPipeds(ctx context.Context, req *webservice.ListPipedsRequest) (*webservice.ListPipedsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: claims.Role.ProjectId,
},
},
}
if req.Options != nil {
if req.Options.Enabled != nil {
opts.Filters = append(opts.Filters, datastore.ListFilter{
Field: "Disabled",
Operator: datastore.OperatorEqual,
Value: !req.Options.Enabled.GetValue(),
})
}
}
pipeds, err := a.pipedStore.ListPipeds(ctx, opts)
if err != nil {
a.logger.Error("failed to get pipeds", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get pipeds")
}
// Redact all sensitive data inside piped message before sending to the client.
for i := range pipeds {
pipeds[i].RedactSensitiveData()
}
return &webservice.ListPipedsResponse{
Pipeds: pipeds,
}, nil
}
func (a *WebAPI) GetPiped(ctx context.Context, req *webservice.GetPipedRequest) (*webservice.GetPipedResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger)
if err != nil {
return nil, err
}
if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil {
return nil, err
}
// Redact all sensitive data inside piped message before sending to the client.
piped.RedactSensitiveData()
return &webservice.GetPipedResponse{
Piped: piped,
}, nil
}
func (a *WebAPI) UpdatePipedDesiredVersion(ctx context.Context, req *webservice.UpdatePipedDesiredVersionRequest) (*webservice.UpdatePipedDesiredVersionResponse, error) {
updater := func(ctx context.Context, pipedID string) error {
return a.pipedStore.UpdatePiped(ctx, pipedID, func(p *model.Piped) error {
p.DesiredVersion = req.Version
return nil
})
}
for _, pipedID := range req.PipedIds {
if err := a.updatePiped(ctx, pipedID, updater); err != nil {
return nil, err
}
}
return &webservice.UpdatePipedDesiredVersionResponse{}, nil
}
// validatePipedBelongsToProject checks if the given piped belongs to the given project.
// It gives back error unless the piped belongs to the project.
func (a *WebAPI) validatePipedBelongsToProject(ctx context.Context, pipedID, projectID string) error {
pid, err := a.pipedProjectCache.Get(pipedID)
if err == nil {
if pid != projectID {
return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in")
}
return nil
}
piped, err := getPiped(ctx, a.pipedStore, pipedID, a.logger)
if err != nil {
return err
}
a.pipedProjectCache.Put(pipedID, piped.ProjectId)
if piped.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in")
}
return nil
}
func (a *WebAPI) ListUnregisteredApplications(ctx context.Context, _ *webservice.ListUnregisteredApplicationsRequest) (*webservice.ListUnregisteredApplicationsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
// Collect all apps that belong to the project.
key := makeUnregisteredAppsCacheKey(claims.Role.ProjectId)
c := rediscache.NewHashCache(a.redis, key)
// pipedToApps assumes to be a map["piped-id"][]byte(slice of *model.ApplicationInfo encoded by encoding/gob)
pipedToApps, err := c.GetAll()
if errors.Is(err, cache.ErrNotFound) {
return &webservice.ListUnregisteredApplicationsResponse{}, nil
}
if err != nil {
a.logger.Error("failed to get unregistered apps", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get unregistered apps")
}
// Integrate all apps cached for each Piped.
allApps := make([]*model.ApplicationInfo, 0)
for _, as := range pipedToApps {
b, ok := as.([]byte)
if !ok {
return nil, status.Error(codes.Internal, "Unexpected data cached")
}
dec := gob.NewDecoder(bytes.NewReader(b))
var apps []*model.ApplicationInfo
if err := dec.Decode(&apps); err != nil {
a.logger.Error("failed to decode the unregistered apps", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to decode the unregistered apps")
}
allApps = append(allApps, apps...)
}
if len(allApps) == 0 {
return &webservice.ListUnregisteredApplicationsResponse{}, nil
}
sort.Slice(allApps, func(i, j int) bool {
return allApps[i].Path < allApps[j].Path
})
return &webservice.ListUnregisteredApplicationsResponse{
Applications: allApps,
}, nil
}
// TODO: Validate the specified piped to ensure that it belongs to the specified environment.
func (a *WebAPI) AddApplication(ctx context.Context, req *webservice.AddApplicationRequest) (*webservice.AddApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger)
if err != nil {
return nil, err
}
if piped.ProjectId != claims.Role.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested piped does not belong to your project")
}
gitpath, err := makeGitPath(
req.GitPath.Repo.Id,
req.GitPath.Path,
req.GitPath.ConfigFilename,
piped,
a.logger,
)
if err != nil {
return nil, err
}
app := model.Application{
Id: uuid.New().String(),
Name: req.Name,
EnvId: req.EnvId,
PipedId: req.PipedId,
ProjectId: claims.Role.ProjectId,
GitPath: gitpath,
Kind: req.Kind,
CloudProvider: req.CloudProvider,
Description: req.Description,
}
err = a.applicationStore.AddApplication(ctx, &app)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The application already exists")
}
if err != nil {
a.logger.Error("failed to create application", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to create application")
}
return &webservice.AddApplicationResponse{
ApplicationId: app.Id,
}, nil
}
func (a *WebAPI) UpdateApplication(ctx context.Context, req *webservice.UpdateApplicationRequest) (*webservice.UpdateApplicationResponse, error) {
updater := func(app *model.Application) error {
app.Name = req.Name
app.EnvId = req.EnvId
app.PipedId = req.PipedId
app.Kind = req.Kind
app.CloudProvider = req.CloudProvider
return nil
}
if err := a.updateApplication(ctx, req.ApplicationId, req.PipedId, updater); err != nil {
return nil, err
}
return &webservice.UpdateApplicationResponse{}, nil
}
func (a *WebAPI) UpdateApplicationDescription(ctx context.Context, req *webservice.UpdateApplicationDescriptionRequest) (*webservice.UpdateApplicationDescriptionResponse, error) {
updater := func(app *model.Application) error {
app.Description = req.Description
return nil
}
if err := a.updateApplication(ctx, req.ApplicationId, "", updater); err != nil {
return nil, err
}
return &webservice.UpdateApplicationDescriptionResponse{}, nil
}
func (a *WebAPI) updateApplication(ctx context.Context, id, pipedID string, updater func(app *model.Application) error) error {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return err
}
// Ensure that the specified piped is assignable for this application.
if pipedID != "" {
piped, err := getPiped(ctx, a.pipedStore, pipedID, a.logger)
if err != nil {
return err
}
if piped.ProjectId != claims.Role.ProjectId {
return status.Error(codes.InvalidArgument, "Requested piped does not belong to your project")
}
}
err = a.applicationStore.UpdateApplication(ctx, id, updater)
if err != nil {
a.logger.Error("failed to update application", zap.Error(err))
return status.Error(codes.Internal, "Failed to update application")
}
return nil
}
func (a *WebAPI) EnableApplication(ctx context.Context, req *webservice.EnableApplicationRequest) (*webservice.EnableApplicationResponse, error) {
if err := a.updateApplicationEnable(ctx, req.ApplicationId, true); err != nil {
return nil, err
}
return &webservice.EnableApplicationResponse{}, nil
}
func (a *WebAPI) DisableApplication(ctx context.Context, req *webservice.DisableApplicationRequest) (*webservice.DisableApplicationResponse, error) {
if err := a.updateApplicationEnable(ctx, req.ApplicationId, false); err != nil {
return nil, err
}
return &webservice.DisableApplicationResponse{}, nil
}
func (a *WebAPI) DeleteApplication(ctx context.Context, req *webservice.DeleteApplicationRequest) (*webservice.DeleteApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil {
return nil, err
}
if err := a.applicationStore.DeleteApplication(ctx, req.ApplicationId); err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.NotFound, "The application is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "Invalid value to delete")
default:
a.logger.Error("failed to delete the application",
zap.String("application-id", req.ApplicationId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to delete the application")
}
}
return &webservice.DeleteApplicationResponse{}, nil
}
func (a *WebAPI) updateApplicationEnable(ctx context.Context, appID string, enable bool) error {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return err
}
if err := a.validateAppBelongsToProject(ctx, appID, claims.Role.ProjectId); err != nil {
return err
}
var updater func(context.Context, string) error
if enable {
updater = a.applicationStore.EnableApplication
} else {
updater = a.applicationStore.DisableApplication
}
if err := updater(ctx, appID); err != nil {
switch err {
case datastore.ErrNotFound:
return status.Error(codes.NotFound, "The application is not found")
case datastore.ErrInvalidArgument:
return status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to update the application",
zap.String("application-id", appID),
zap.Error(err),
)
return status.Error(codes.Internal, "Failed to update the application")
}
}
return nil
}
func (a *WebAPI) ListApplications(ctx context.Context, req *webservice.ListApplicationsRequest) (*webservice.ListApplicationsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
orders := []datastore.Order{
{
Field: "UpdatedAt",
Direction: datastore.Desc,
},
{
Field: "Id",
Direction: datastore.Asc,
},
}
filters := []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: claims.Role.ProjectId,
},
}
if o := req.Options; o != nil {
if o.Enabled != nil {
filters = append(filters, datastore.ListFilter{
Field: "Disabled",
Operator: datastore.OperatorEqual,
Value: !o.Enabled.GetValue(),
})
}
// Allowing multiple so that it can do In Query later.
// Currently only the first value is used.
if len(o.Kinds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "Kind",
Operator: datastore.OperatorEqual,
Value: o.Kinds[0],
})
}
if len(o.SyncStatuses) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "SyncState.Status",
Operator: datastore.OperatorEqual,
Value: o.SyncStatuses[0],
})
}
if len(o.EnvIds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "EnvId",
Operator: datastore.OperatorEqual,
Value: o.EnvIds[0],
})
}
if o.Name != "" {
filters = append(filters, datastore.ListFilter{
Field: "Name",
Operator: datastore.OperatorEqual,
Value: o.Name,
})
}
}
apps, _, err := a.applicationStore.ListApplications(ctx, datastore.ListOptions{
Filters: filters,
Orders: orders,
})
if err != nil {
a.logger.Error("failed to get applications", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get applications")
}
if len(req.Options.Labels) == 0 {
return &webservice.ListApplicationsResponse{
Applications: apps,
}, nil
}
// NOTE: Filtering by labels is done by the application-side because we need to create composite indexes for every combination in the filter.
filtered := make([]*model.Application, 0, len(apps))
for _, a := range apps {
if a.ContainLabels(req.Options.Labels) {
filtered = append(filtered, a)
}
}
return &webservice.ListApplicationsResponse{
Applications: filtered,
}, nil
}
func (a *WebAPI) SyncApplication(ctx context.Context, req *webservice.SyncApplicationRequest) (*webservice.SyncApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger)
if err != nil {
return nil, err
}
if claims.Role.ProjectId != app.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested application does not belong to your project")
}
cmd := model.Command{
Id: uuid.New().String(),
PipedId: app.PipedId,
ApplicationId: app.Id,
ProjectId: app.ProjectId,
Type: model.Command_SYNC_APPLICATION,
Commander: claims.Subject,
SyncApplication: &model.Command_SyncApplication{
ApplicationId: app.Id,
SyncStrategy: req.SyncStrategy,
},
}
if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil {
return nil, err
}
return &webservice.SyncApplicationResponse{
CommandId: cmd.Id,
}, nil
}
func (a *WebAPI) GetApplication(ctx context.Context, req *webservice.GetApplicationRequest) (*webservice.GetApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger)
if err != nil {
return nil, err
}
if app.ProjectId != claims.Role.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested application does not belong to your project")
}
return &webservice.GetApplicationResponse{
Application: app,
}, nil
}
func (a *WebAPI) GenerateApplicationSealedSecret(ctx context.Context, req *webservice.GenerateApplicationSealedSecretRequest) (*webservice.GenerateApplicationSealedSecretResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger)
if err != nil {
return nil, err
}
if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil {
return nil, err
}
se := model.GetSecretEncryptionInPiped(piped)
pubkey, err := getEncriptionKey(se)
if err != nil {
return nil, err
}
ciphertext, err := encrypt(req.Data, pubkey, req.Base64Encoding, a.logger)
if err != nil {
return nil, err
}
return &webservice.GenerateApplicationSealedSecretResponse{
Data: ciphertext,
}, nil
}
// validateAppBelongsToProject checks if the given application belongs to the given project.
// It gives back error unless the application belongs to the project.
func (a *WebAPI) validateAppBelongsToProject(ctx context.Context, appID, projectID string) error {
pid, err := a.appProjectCache.Get(appID)
if err == nil {
if pid != projectID {
return status.Error(codes.PermissionDenied, "Requested application doesn't belong to the project you logged in")
}
return nil
}
app, err := getApplication(ctx, a.applicationStore, appID, a.logger)
if err != nil {
return err
}
a.appProjectCache.Put(appID, app.ProjectId)
if app.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "Requested application doesn't belong to the project you logged in")
}
return nil
}
func (a *WebAPI) ListDeployments(ctx context.Context, req *webservice.ListDeploymentsRequest) (*webservice.ListDeploymentsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
orders := []datastore.Order{
{
Field: "UpdatedAt",
Direction: datastore.Desc,
},
{
Field: "Id",
Direction: datastore.Asc,
},
}
filters := []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: claims.Role.ProjectId,
},
{
Field: "UpdatedAt",
Operator: datastore.OperatorGreaterThanOrEqual,
Value: req.PageMinUpdatedAt,
},
}
if o := req.Options; o != nil {
// Allowing multiple so that it can do In Query later.
// Currently only the first value is used.
if len(o.Statuses) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "Status",
Operator: datastore.OperatorEqual,
Value: o.Statuses[0],
})
}
if len(o.Kinds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "Kind",
Operator: datastore.OperatorEqual,
Value: o.Kinds[0],
})
}
if len(o.ApplicationIds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "ApplicationId",
Operator: datastore.OperatorEqual,
Value: o.ApplicationIds[0],
})
}
if len(o.EnvIds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "EnvId",
Operator: datastore.OperatorEqual,
Value: o.EnvIds[0],
})
}
if o.ApplicationName != "" {
filters = append(filters, datastore.ListFilter{
Field: "ApplicationName",
Operator: datastore.OperatorEqual,
Value: o.ApplicationName,
})
}
}
pageSize := int(req.PageSize)
options := datastore.ListOptions{
Filters: filters,
Orders: orders,
Limit: pageSize,
Cursor: req.Cursor,
}
deployments, cursor, err := a.deploymentStore.ListDeployments(ctx, options)
if err != nil {
a.logger.Error("failed to get deployments", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get deployments")
}
labels := req.Options.Labels
if len(labels) == 0 || len(deployments) == 0 {
return &webservice.ListDeploymentsResponse{
Deployments: deployments,
Cursor: cursor,
}, nil
}
// Start filtering them by labels.
//
// NOTE: Filtering by labels is done by the application-side because we need to create composite indexes for every combination in the filter.
// We don't want to depend on any other search engine, that's why it filters here.
filtered := make([]*model.Deployment, 0, len(deployments))
for _, d := range deployments {
if d.ContainLabels(labels) {
filtered = append(filtered, d)
}
}
// Stop running additional queries for more data, and return filtered deployments immediately with
// current cursor if the size before filtering is already less than the page size.
if len(deployments) < pageSize {
return &webservice.ListDeploymentsResponse{
Deployments: filtered,
Cursor: cursor,
}, nil
}
// Repeat the query until the number of filtered deployments reaches the page size,
// or until it finishes scanning to page_min_updated_at.
for len(filtered) < pageSize {
options.Cursor = cursor
deployments, cursor, err = a.deploymentStore.ListDeployments(ctx, options)
if err != nil {
a.logger.Error("failed to get deployments", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get deployments")
}
if len(deployments) == 0 {
break
}
for _, d := range deployments {
if d.ContainLabels(labels) {
filtered = append(filtered, d)
}
}
// We've already specified UpdatedAt >= req.PageMinUpdatedAt, so we need to check just equality.
if deployments[len(deployments)-1].UpdatedAt == req.PageMinUpdatedAt {
break
}
}
// TODO: Think about possibility that the response of ListDeployments exceeds the page size
return &webservice.ListDeploymentsResponse{
Deployments: filtered,
Cursor: cursor,
}, nil
}
func (a *WebAPI) GetDeployment(ctx context.Context, req *webservice.GetDeploymentRequest) (*webservice.GetDeploymentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger)
if err != nil {
return nil, err
}
if claims.Role.ProjectId != deployment.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested deployment does not belong to your project")
}
return &webservice.GetDeploymentResponse{
Deployment: deployment,
}, nil
}
// validateDeploymentBelongsToProject checks if the given deployment belongs to the given project.
// It gives back error unless the deployment belongs to the project.
func (a *WebAPI) validateDeploymentBelongsToProject(ctx context.Context, deploymentID, projectID string) error {
pid, err := a.deploymentProjectCache.Get(deploymentID)
if err == nil {
if pid != projectID {
return status.Error(codes.PermissionDenied, "Requested deployment doesn't belong to the project you logged in")
}
return nil
}
deployment, err := getDeployment(ctx, a.deploymentStore, deploymentID, a.logger)
if err != nil {
return err
}
a.deploymentProjectCache.Put(deploymentID, deployment.ProjectId)
if deployment.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "Requested deployment doesn't belong to the project you logged in")
}
return nil
}
func (a *WebAPI) GetStageLog(ctx context.Context, req *webservice.GetStageLogRequest) (*webservice.GetStageLogResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil {
return nil, err
}
blocks, completed, err := a.stageLogStore.FetchLogs(ctx, req.DeploymentId, req.StageId, req.RetriedCount, req.OffsetIndex)
if errors.Is(err, stagelogstore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "The stage log not found")
}
if err != nil {
a.logger.Error("failed to get stage logs", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get stage logs")
}
return &webservice.GetStageLogResponse{
Blocks: blocks,
Completed: completed,
}, nil
}
func (a *WebAPI) CancelDeployment(ctx context.Context, req *webservice.CancelDeploymentRequest) (*webservice.CancelDeploymentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger)
if err != nil {
return nil, err
}
if claims.Role.ProjectId != deployment.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested deployment does not belong to your project")
}
if model.IsCompletedDeployment(deployment.Status) {
return nil, status.Errorf(codes.FailedPrecondition, "could not cancel the deployment because it was already completed")
}
cmd := model.Command{
Id: uuid.New().String(),
PipedId: deployment.PipedId,
ApplicationId: deployment.ApplicationId,
ProjectId: deployment.ProjectId,
DeploymentId: req.DeploymentId,
Type: model.Command_CANCEL_DEPLOYMENT,
Commander: claims.Subject,
CancelDeployment: &model.Command_CancelDeployment{
DeploymentId: req.DeploymentId,
ForceRollback: req.ForceRollback,
ForceNoRollback: req.ForceNoRollback,
},
}
if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil {
return nil, err
}
return &webservice.CancelDeploymentResponse{
CommandId: cmd.Id,
}, nil
}
func (a *WebAPI) ApproveStage(ctx context.Context, req *webservice.ApproveStageRequest) (*webservice.ApproveStageResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger)
if err != nil {
return nil, err
}
if err := validateApprover(deployment.Stages, claims.Subject, req.StageId); err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil {
return nil, err
}
stage, ok := deployment.StageStatusMap()[req.StageId]
if !ok {
return nil, status.Error(codes.FailedPrecondition, "The stage was not found in the deployment")
}
if model.IsCompletedStage(stage) {
return nil, status.Errorf(codes.FailedPrecondition, "Could not approve the stage because it was already completed")
}
commandID := uuid.New().String()
cmd := model.Command{
Id: commandID,
PipedId: deployment.PipedId,
ApplicationId: deployment.ApplicationId,
ProjectId: deployment.ProjectId,
DeploymentId: req.DeploymentId,
StageId: req.StageId,
Type: model.Command_APPROVE_STAGE,
Commander: claims.Subject,
ApproveStage: &model.Command_ApproveStage{
DeploymentId: req.DeploymentId,
StageId: req.StageId,
},
}
if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil {
return nil, err
}
return &webservice.ApproveStageResponse{
CommandId: commandID,
}, nil
}
// No error means that the given commander is valid.
func validateApprover(stages []*model.PipelineStage, commander, stageID string) error {
var approvers []string
for _, s := range stages {
if s.Id != stageID {
continue
}
if as := s.Metadata["Approvers"]; as != "" {
approvers = strings.Split(as, ",")
}
break
}
if len(approvers) == 0 {
// Anyone can approve the deployment pipeline
return nil
}
for _, ap := range approvers {
if ap == commander {
return nil
}
}
return status.Error(codes.PermissionDenied, fmt.Sprintf("You can't approve this deployment because you (%s) are not in the approver list: %v", commander, approvers))
}
func (a *WebAPI) GetApplicationLiveState(ctx context.Context, req *webservice.GetApplicationLiveStateRequest) (*webservice.GetApplicationLiveStateResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil {
return nil, err
}
snapshot, err := a.applicationLiveStateStore.GetStateSnapshot(ctx, req.ApplicationId)
if errors.Is(err, filestore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "Application live state not found")
}
if err != nil {
a.logger.Error("failed to get application live state", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get application live state")
}
return &webservice.GetApplicationLiveStateResponse{
Snapshot: snapshot,
}, nil
}
// GetProject gets the specified porject without sensitive data.
func (a *WebAPI) GetProject(ctx context.Context, req *webservice.GetProjectRequest) (*webservice.GetProjectResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
project, err := a.getProject(ctx, claims.Role.ProjectId)
if err != nil {
return nil, err
}
// Redact all sensitive data inside project message before sending to the client.
project.RedactSensitiveData()
return &webservice.GetProjectResponse{
Project: project,
}, nil
}
func (a *WebAPI) getProject(ctx context.Context, projectID string) (*model.Project, error) {
if p, ok := a.projectsInConfig[projectID]; ok {
return &model.Project{
Id: p.Id,
Desc: p.Desc,
StaticAdmin: &model.ProjectStaticUser{
Username: p.StaticAdmin.Username,
PasswordHash: p.StaticAdmin.PasswordHash,
},
}, nil
}
project, err := a.projectStore.GetProject(ctx, projectID)
if errors.Is(err, datastore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "The project is not found")
}
if err != nil {
a.logger.Error("failed to get project", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get project")
}
return project, nil
}
// UpdateProjectStaticAdmin updates the static admin user settings.
func (a *WebAPI) UpdateProjectStaticAdmin(ctx context.Context, req *webservice.UpdateProjectStaticAdminRequest) (*webservice.UpdateProjectStaticAdminResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.UpdateProjectStaticAdmin(ctx, claims.Role.ProjectId, req.Username, req.Password); err != nil {
a.logger.Error("failed to update static admin", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update static admin")
}
return &webservice.UpdateProjectStaticAdminResponse{}, nil
}
// EnableStaticAdmin enables static admin login.
func (a *WebAPI) EnableStaticAdmin(ctx context.Context, req *webservice.EnableStaticAdminRequest) (*webservice.EnableStaticAdminResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.EnableStaticAdmin(ctx, claims.Role.ProjectId); err != nil {
a.logger.Error("failed to enable static admin login", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to enable static admin login")
}
return &webservice.EnableStaticAdminResponse{}, nil
}
// DisableStaticAdmin disables static admin login.
func (a *WebAPI) DisableStaticAdmin(ctx context.Context, req *webservice.DisableStaticAdminRequest) (*webservice.DisableStaticAdminResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.DisableStaticAdmin(ctx, claims.Role.ProjectId); err != nil {
a.logger.Error("failed to disenable static admin login", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to disenable static admin login")
}
return &webservice.DisableStaticAdminResponse{}, nil
}
// UpdateProjectSSOConfig updates the sso settings.
func (a *WebAPI) UpdateProjectSSOConfig(ctx context.Context, req *webservice.UpdateProjectSSOConfigRequest) (*webservice.UpdateProjectSSOConfigResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := req.Sso.Encrypt(a.encrypter); err != nil {
a.logger.Error("failed to encrypt sensitive data in sso configurations", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to encrypt sensitive data in sso configurations")
}
if err := a.projectStore.UpdateProjectSSOConfig(ctx, claims.Role.ProjectId, req.Sso); err != nil {
a.logger.Error("failed to update project single sign on settings", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update project single sign on settings")
}
return &webservice.UpdateProjectSSOConfigResponse{}, nil
}
// UpdateProjectRBACConfig updates the sso settings.
func (a *WebAPI) UpdateProjectRBACConfig(ctx context.Context, req *webservice.UpdateProjectRBACConfigRequest) (*webservice.UpdateProjectRBACConfigResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.UpdateProjectRBACConfig(ctx, claims.Role.ProjectId, req.Rbac); err != nil {
a.logger.Error("failed to update project single sign on settings", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update project single sign on settings")
}
return &webservice.UpdateProjectRBACConfigResponse{}, nil
}
// GetMe gets information about the current user.
func (a *WebAPI) GetMe(ctx context.Context, req *webservice.GetMeRequest) (*webservice.GetMeResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
return &webservice.GetMeResponse{
Subject: claims.Subject,
AvatarUrl: claims.AvatarURL,
ProjectId: claims.Role.ProjectId,
ProjectRole: claims.Role.ProjectRole,
}, nil
}
func (a *WebAPI) GetCommand(ctx context.Context, req *webservice.GetCommandRequest) (*webservice.GetCommandResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
cmd, err := getCommand(ctx, a.commandStore, req.CommandId, a.logger)
if err != nil {
return nil, err
}
if claims.Role.ProjectId != cmd.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested command does not belong to your project")
}
return &webservice.GetCommandResponse{
Command: cmd,
}, nil
}
func (a *WebAPI) GenerateAPIKey(ctx context.Context, req *webservice.GenerateAPIKeyRequest) (*webservice.GenerateAPIKeyResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
id := uuid.New().String()
key, hash, err := model.GenerateAPIKey(id)
if err != nil {
a.logger.Error("failed to generate API key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to generate API key")
}
apiKey := model.APIKey{
Id: id,
Name: req.Name,
KeyHash: hash,
ProjectId: claims.Role.ProjectId,
Role: req.Role,
Creator: claims.Subject,
}
err = a.apiKeyStore.AddAPIKey(ctx, &apiKey)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The API key already exists")
}
if err != nil {
a.logger.Error("failed to create API key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to create API key")
}
return &webservice.GenerateAPIKeyResponse{
Key: key,
}, nil
}
func (a *WebAPI) DisableAPIKey(ctx context.Context, req *webservice.DisableAPIKeyRequest) (*webservice.DisableAPIKeyResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.apiKeyStore.DisableAPIKey(ctx, req.Id, claims.Role.ProjectId); err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.InvalidArgument, "The API key is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to disable the API key",
zap.String("apikey-id", req.Id),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to disable the API key")
}
}
return &webservice.DisableAPIKeyResponse{}, nil
}
func (a *WebAPI) ListAPIKeys(ctx context.Context, req *webservice.ListAPIKeysRequest) (*webservice.ListAPIKeysResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: claims.Role.ProjectId,
},
},
}
if req.Options != nil {
if req.Options.Enabled != nil {
opts.Filters = append(opts.Filters, datastore.ListFilter{
Field: "Disabled",
Operator: datastore.OperatorEqual,
Value: !req.Options.Enabled.GetValue(),
})
}
}
apiKeys, err := a.apiKeyStore.ListAPIKeys(ctx, opts)
if err != nil {
a.logger.Error("failed to list API keys", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to list API keys")
}
// Redact all sensitive data inside API key before sending to the client.
for i := range apiKeys {
apiKeys[i].RedactSensitiveData()
}
return &webservice.ListAPIKeysResponse{
Keys: apiKeys,
}, nil
}
// GetInsightData returns the accumulated insight data.
func (a *WebAPI) GetInsightData(ctx context.Context, req *webservice.GetInsightDataRequest) (*webservice.GetInsightDataResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
count := int(req.DataPointCount)
from := time.Unix(req.RangeFrom, 0)
chunks, err := insightstore.LoadChunksFromCache(a.insightCache, claims.Role.ProjectId, req.ApplicationId, req.MetricsKind, req.Step, from, count)
if err != nil {
a.logger.Error("failed to load chunks from cache", zap.Error(err))
chunks, err = a.insightStore.LoadChunks(ctx, claims.Role.ProjectId, req.ApplicationId, req.MetricsKind, req.Step, from, count)
if err != nil {
a.logger.Error("failed to load chunks from insightstore", zap.Error(err))
return nil, err
}
if err := insightstore.PutChunksToCache(a.insightCache, chunks); err != nil {
a.logger.Error("failed to put chunks to cache", zap.Error(err))
}
}
idp, err := chunks.ExtractDataPoints(req.Step, from, count)
if err != nil {
a.logger.Error("failed to extract data points from chunks", zap.Error(err))
}
var updateAt int64
for _, c := range chunks {
accumulatedTo := c.GetAccumulatedTo()
if accumulatedTo > updateAt {
updateAt = accumulatedTo
}
}
return &webservice.GetInsightDataResponse{
UpdatedAt: updateAt,
DataPoints: idp,
Type: model.InsightResultType_MATRIX,
Matrix: []*model.InsightSampleStream{
{
DataPoints: idp,
},
},
}, nil
}
func (a *WebAPI) GetInsightApplicationCount(ctx context.Context, req *webservice.GetInsightApplicationCountRequest) (*webservice.GetInsightApplicationCountResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
// TODO: Cache application counts in the cache service.
c, err := a.insightStore.LoadApplicationCounts(ctx, claims.Role.ProjectId)
if err != nil {
if err == filestore.ErrNotFound {
return nil, status.Error(codes.NotFound, "Not found")
}
a.logger.Error("failed to load application counts", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to load application counts")
}
counts := make([]*model.InsightApplicationCount, 0, len(c.Counts))
for i := range c.Counts {
counts = append(counts, &c.Counts[i])
}
return &webservice.GetInsightApplicationCountResponse{
Counts: counts,
UpdatedAt: c.UpdatedAt,
}, nil
}
| 1 | 23,160 | This TODO and this `sort` depend on how the UI looks like. So I think we should fix the UI before continuing on updating this web API. For example, in case the UI flow is as below, this API will receive a Piped ID and then return just ones belonging to that Piped. - User picks where they want to deploy first (select one Piped and one cloud provider) - Then API will be called to show the list of suggested apps for that Piped and cloud provider - User picks app they want to create | pipe-cd-pipe | go |
@@ -24,6 +24,16 @@ import (
"github.com/spf13/cobra"
)
+var (
+ snapshotrevertHelpText = `
+Usage: mayactl snapshot revert [options]
+
+$ mayactl snapshot revert --volname <vol> --snapname <snap>
+
+This command displays status of available snapshotrevert.
+`
+)
+
/*type CmdSnaphotCreateOptions struct {
volName string
snapName string | 1 | /*
Copyright 2017 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package snapshot
import (
"fmt"
"github.com/openebs/maya/pkg/client/mapiserver"
"github.com/openebs/maya/pkg/util"
"github.com/spf13/cobra"
)
/*type CmdSnaphotCreateOptions struct {
volName string
snapName string
}*/
// NewCmdSnapshotRevert reverts a snapshot of OpenEBS Volume
func NewCmdSnapshotRevert() *cobra.Command {
options := CmdSnaphotCreateOptions{}
cmd := &cobra.Command{
Use: "revert",
Short: "Reverts to specific snapshot of a Volume",
Long: "Reverts to specific snapshot of a Volume",
Run: func(cmd *cobra.Command, args []string) {
util.CheckErr(options.Validate(cmd), util.Fatal)
util.CheckErr(options.RunSnapshotRevert(cmd), util.Fatal)
},
}
cmd.Flags().StringVarP(&options.volName, "volname", "n", options.volName,
"unique volume name.")
cmd.MarkPersistentFlagRequired("volname")
cmd.MarkPersistentFlagRequired("snapname")
cmd.Flags().StringVarP(&options.snapName, "snapname", "s", options.snapName,
"unique snapshot name")
return cmd
}
// RunSnapshotRevert does tasks related to mayaserver.
func (c *CmdSnaphotCreateOptions) RunSnapshotRevert(cmd *cobra.Command) error {
fmt.Println("Executing volume snapshot revert ...")
resp := mapiserver.RevertSnapshot(c.volName, c.snapName)
if resp != nil {
return fmt.Errorf("Error: %v", resp)
}
fmt.Printf("Reverting to snapshot [%s] of volume [%s]\n", c.snapName, c.volName)
return nil
}
| 1 | 8,198 | This command rolls back the volume data to the specified snapshot. Once the roll back to snapshot is successful, all the data changes made after the snapshot was taken will be post. This command should be used cautiously and only when there is an issue with the current state of the data. | openebs-maya | go |
@@ -567,9 +567,12 @@ func TestAwslogsDriver(t *testing.T) {
})
}()
+ strs := strings.Split(*testTask.TaskArn, "/")
+ taskId := strs[len(strs)-1]
+
params := &cloudwatchlogs.GetLogEventsInput{
LogGroupName: aws.String(awslogsLogGroupName),
- LogStreamName: aws.String(containerId),
+ LogStreamName: aws.String(fmt.Sprintf("ecs-functional-tests/awslogs/%s", taskId)),
}
resp, err := cwlClient.GetLogEvents(params)
if err != nil { | 1 | // +build functional
// Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package functional_tests
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"testing"
"time"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
. "github.com/aws/amazon-ecs-agent/agent/functional_tests/util"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
docker "github.com/fsouza/go-dockerclient"
"github.com/pborman/uuid"
)
const (
waitTaskStateChangeDuration = 2 * time.Minute
waitMetricsInCloudwatchDuration = 4 * time.Minute
awslogsLogGroupName = "ecs-functional-tests"
)
// TestRunManyTasks runs several tasks in short succession and expects them to
// all run.
func TestRunManyTasks(t *testing.T) {
agent := RunAgent(t, nil)
defer agent.Cleanup()
numToRun := 15
tasks := []*TestTask{}
attemptsTaken := 0
td, err := GetTaskDefinition("simple-exit")
if err != nil {
t.Fatalf("Get task definition error: %v", err)
}
for numRun := 0; len(tasks) < numToRun; attemptsTaken++ {
startNum := 10
if numToRun-len(tasks) < 10 {
startNum = numToRun - len(tasks)
}
startedTasks, err := agent.StartMultipleTasks(t, td, startNum)
if err != nil {
continue
}
tasks = append(tasks, startedTasks...)
numRun += 10
}
t.Logf("Ran %v containers; took %v tries\n", numToRun, attemptsTaken)
for _, task := range tasks {
err := task.WaitStopped(10 * time.Minute)
if err != nil {
t.Error(err)
}
if code, ok := task.ContainerExitcode("exit"); !ok || code != 42 {
t.Error("Wrong exit code")
}
}
}
// TestPullInvalidImage verifies that an invalid image returns an error
func TestPullInvalidImage(t *testing.T) {
agent := RunAgent(t, nil)
defer agent.Cleanup()
testTask, err := agent.StartTask(t, "invalid-image")
if err != nil {
t.Fatalf("Expected to start invalid-image task: %v", err)
}
if err = testTask.ExpectErrorType("error", "CannotPullContainerError", 1*time.Minute); err != nil {
t.Error(err)
}
}
// TestOOMContainer verifies that an OOM container returns an error
func TestOOMContainer(t *testing.T) {
RequireDockerVersion(t, "<1.9.0,>1.9.1") // https://github.com/docker/docker/issues/18510
agent := RunAgent(t, nil)
defer agent.Cleanup()
testTask, err := agent.StartTask(t, "oom-container")
if err != nil {
t.Fatalf("Expected to start invalid-image task: %v", err)
}
if err = testTask.ExpectErrorType("error", "OutOfMemoryError", 1*time.Minute); err != nil {
t.Error(err)
}
}
// This test addresses a deadlock issue which was noted in GH:313 and fixed
// in GH:320. It runs a service with 10 containers, waits for cleanup, starts
// another two instances of that service and ensures that those tasks complete.
func TestTaskCleanupDoesNotDeadlock(t *testing.T) {
// Set the ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION to its lowest permissible value
os.Setenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION", "60s")
defer os.Unsetenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION")
agent := RunAgent(t, nil)
defer agent.Cleanup()
// This bug was fixed in v1.8.1
agent.RequireVersion(">=1.8.1")
// Run two Tasks after cleanup, as the deadlock does not consistently occur after
// after just one task cleanup cycle.
for i := 0; i < 3; i++ {
// Start a task with ten containers
testTask, err := agent.StartTask(t, "ten-containers")
if err != nil {
t.Fatalf("Cycle %d: There was an error starting the Task: %v", i, err)
}
isTaskRunning, err := agent.WaitRunningViaIntrospection(testTask)
if err != nil || !isTaskRunning {
t.Fatalf("Cycle %d: Task should be RUNNING but is not: %v", i, err)
}
// Get the dockerID so we can later check that the container has been cleaned up.
dockerId, err := agent.ResolveTaskDockerID(testTask, "1")
if err != nil {
t.Fatalf("Cycle %d: Error resolving docker id for container in task: %v", i, err)
}
// 2 minutes should be enough for the Task to have completed. If the task has not
// completed and is in PENDING, the agent is most likely deadlocked.
err = testTask.WaitStopped(2 * time.Minute)
if err != nil {
t.Fatalf("Cycle %d: Task did not transition into to STOPPED in time: %v", i, err)
}
isTaskStopped, err := agent.WaitStoppedViaIntrospection(testTask)
if err != nil || !isTaskStopped {
t.Fatalf("Cycle %d: Task should be STOPPED but is not: %v", i, err)
}
// Wait for the tasks to be cleaned up
time.Sleep(90 * time.Second)
// Ensure that tasks are cleaned up. WWe should not be able to describe the
// container now since it has been cleaned up.
_, err = agent.DockerClient.InspectContainer(dockerId)
if err == nil {
t.Fatalf("Cycle %d: Expected error inspecting container in task.", i)
}
}
}
// TestSavedState verifies that stopping the agent, stopping a container under
// its control, and starting the agent results in that container being moved to
// 'stopped'
func TestSavedState(t *testing.T) {
agent := RunAgent(t, nil)
defer agent.Cleanup()
testTask, err := agent.StartTask(t, "nginx")
if err != nil {
t.Fatal(err)
}
err = testTask.WaitRunning(1 * time.Minute)
if err != nil {
t.Fatal(err)
}
dockerId, err := agent.ResolveTaskDockerID(testTask, "nginx")
if err != nil {
t.Fatal(err)
}
err = agent.StopAgent()
if err != nil {
t.Fatal(err)
}
err = agent.DockerClient.StopContainer(dockerId, 1)
if err != nil {
t.Fatal(err)
}
err = agent.StartAgent()
if err != nil {
t.Fatal(err)
}
testTask.WaitStopped(1 * time.Minute)
}
// TestPortResourceContention verifies that running two tasks on the same port
// in quick-succession does not result in the second one failing to run. It
// verifies the 'seqnum' serialization stuff works.
func TestPortResourceContention(t *testing.T) {
agent := RunAgent(t, nil)
defer agent.Cleanup()
testTask, err := agent.StartTask(t, "busybox-port-5180")
if err != nil {
t.Fatal(err)
}
err = testTask.WaitRunning(2 * time.Minute)
if err != nil {
t.Fatal(err)
}
err = testTask.Stop()
if err != nil {
t.Fatal(err)
}
testTask2, err := agent.StartTask(t, "busybox-port-5180")
if err != nil {
t.Fatal(err)
}
err = testTask2.WaitRunning(4 * time.Minute)
if err != nil {
t.Fatal(err)
}
testTask2.Stop()
go testTask.WaitStopped(2 * time.Minute)
testTask2.WaitStopped(2 * time.Minute)
}
func strptr(s string) *string { return &s }
func TestCommandOverrides(t *testing.T) {
agent := RunAgent(t, nil)
defer agent.Cleanup()
task, err := agent.StartTaskWithOverrides(t, "simple-exit", []*ecs.ContainerOverride{
&ecs.ContainerOverride{
Name: strptr("exit"),
Command: []*string{strptr("sh"), strptr("-c"), strptr("exit 21")},
},
})
if err != nil {
t.Fatal(err)
}
err = task.WaitStopped(2 * time.Minute)
if err != nil {
t.Fatal(err)
}
if exitCode, _ := task.ContainerExitcode("exit"); exitCode != 21 {
t.Errorf("Expected exit code of 21; got %v", exitCode)
}
}
func TestLabels(t *testing.T) {
agent := RunAgent(t, nil)
defer agent.Cleanup()
agent.RequireVersion(">=1.5.0")
task, err := agent.StartTask(t, "labels")
if err != nil {
t.Fatal(err)
}
err = task.WaitStopped(2 * time.Minute)
if err != nil {
t.Fatal(err)
}
dockerId, err := agent.ResolveTaskDockerID(task, "labeled")
if err != nil {
t.Fatal(err)
}
container, err := agent.DockerClient.InspectContainer(dockerId)
if err != nil {
t.Fatal(err)
}
if container.Config.Labels["label1"] != "" || container.Config.Labels["com.foo.label2"] != "value" {
t.Fatalf("Labels did not match expected; expected to contain label1: com.foo.label2:value, got %v", container.Config.Labels)
}
}
func TestLogdriverOptions(t *testing.T) {
agent := RunAgent(t, nil)
defer agent.Cleanup()
agent.RequireVersion(">=1.5.0")
task, err := agent.StartTask(t, "logdriver-jsonfile")
if err != nil {
t.Fatal(err)
}
err = task.WaitStopped(2 * time.Minute)
if err != nil {
t.Fatal(err)
}
dockerId, err := agent.ResolveTaskDockerID(task, "exit")
if err != nil {
t.Fatal(err)
}
container, err := agent.DockerClient.InspectContainer(dockerId)
if err != nil {
t.Fatal(err)
}
if container.HostConfig.LogConfig.Type != "json-file" {
t.Errorf("Expected json-file type logconfig, was %v", container.HostConfig.LogConfig.Type)
}
if !reflect.DeepEqual(map[string]string{"max-file": "50", "max-size": "50k"}, container.HostConfig.LogConfig.Config) {
t.Errorf("Expected max-file:50 max-size:50k for logconfig options, got %v", container.HostConfig.LogConfig.Config)
}
}
func TestDockerAuth(t *testing.T) {
agent := RunAgent(t, &AgentOptions{
ExtraEnvironment: map[string]string{
"ECS_ENGINE_AUTH_TYPE": "dockercfg",
"ECS_ENGINE_AUTH_DATA": `{"127.0.0.1:51671":{"auth":"dXNlcjpzd29yZGZpc2g=","email":"[email protected]"}}`, // user:swordfish
},
})
defer agent.Cleanup()
task, err := agent.StartTask(t, "simple-exit-authed")
if err != nil {
t.Fatal(err)
}
err = task.WaitStopped(2 * time.Minute)
if err != nil {
t.Fatal(err)
}
if exitCode, _ := task.ContainerExitcode("exit"); exitCode != 42 {
t.Errorf("Expected exit code of 42; got %v", exitCode)
}
// verify there's no sign of auth details in the config; action item taken as
// a result of accidentally logging them once
logdir := agent.Logdir
badStrings := []string{"user:swordfish", "swordfish", "dXNlcjpzd29yZGZpc2g="}
err = filepath.Walk(logdir, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
for _, badstring := range badStrings {
if strings.Contains(string(data), badstring) {
t.Fatalf("log data contained bad string: %v, %v", string(data), badstring)
}
if strings.Contains(string(data), fmt.Sprintf("%v", []byte(badstring))) {
t.Fatalf("log data contained byte-slice representation of bad string: %v, %v", string(data), badstring)
}
gobytes := fmt.Sprintf("%#v", []byte(badstring))
// format is []byte{0x12, 0x34}
// if it were json.RawMessage or another alias, it would print as json.RawMessage ... in the log
// Because of this, strip down to just the comma-seperated hex and look for that
if strings.Contains(string(data), gobytes[len(`[]byte{`):len(gobytes)-1]) {
t.Fatalf("log data contained byte-hex representation of bad string: %v, %v", string(data), badstring)
}
}
return nil
})
if err != nil {
t.Errorf("Could not walk logdir: %v", err)
}
}
func TestSquidProxy(t *testing.T) {
// Run a squid proxy manually, verify that the agent can connect through it
client, err := docker.NewVersionedClientFromEnv("1.17")
if err != nil {
t.Fatal(err)
}
dockerConfig := docker.Config{
Image: "127.0.0.1:51670/amazon/squid:latest",
}
dockerHostConfig := docker.HostConfig{}
squidContainer, err := client.CreateContainer(docker.CreateContainerOptions{
Config: &dockerConfig,
HostConfig: &dockerHostConfig,
})
if err != nil {
t.Fatal(err)
}
if err := client.StartContainer(squidContainer.ID, &dockerHostConfig); err != nil {
t.Fatal(err)
}
defer func() {
client.RemoveContainer(docker.RemoveContainerOptions{
Force: true,
ID: squidContainer.ID,
RemoveVolumes: true,
})
}()
// Resolve the name so we can use it in the link below; the create returns an ID only
squidContainer, err = client.InspectContainer(squidContainer.ID)
if err != nil {
t.Fatal(err)
}
// Squid startup time
time.Sleep(1 * time.Second)
t.Logf("Started squid container: %v", squidContainer.Name)
agent := RunAgent(t, &AgentOptions{
ExtraEnvironment: map[string]string{
"HTTP_PROXY": "squid:3128",
"NO_PROXY": "169.254.169.254,/var/run/docker.sock",
},
ContainerLinks: []string{squidContainer.Name + ":squid"},
})
defer agent.Cleanup()
agent.RequireVersion(">1.5.0")
task, err := agent.StartTask(t, "simple-exit")
if err != nil {
t.Fatal(err)
}
// Verify the agent can run a container using the proxy
task.WaitStopped(1 * time.Minute)
// stop the agent, thus forcing it to close its connections; this is needed
// because squid's access logs are written on DC not connect
err = agent.StopAgent()
if err != nil {
t.Fatal(err)
}
// Now verify it actually used the proxy via squids access logs. Get all the
// unique addresses that squid proxied for (assume nothing else used the
// proxy).
// This should be '3' currently, for example I see the following at the time of writing
// ecs.us-west-2.amazonaws.com:443
// ecs-a-1.us-west-2.amazonaws.com:443
// ecs-t-1.us-west-2.amazonaws.com:443
// Note, it connects multiple times to the first one which is an
// implementation detail we might change/optimize, intentionally dedupe so
// we're not tied to that sorta thing
// Note, do a docker exec instead of bindmount the logs out because the logs
// will not be permissioned correctly in the bindmount. Once we have proper
// user namespacing we could revisit this
logExec, err := client.CreateExec(docker.CreateExecOptions{
AttachStdout: true,
AttachStdin: false,
Container: squidContainer.ID,
// Takes a second to flush the file sometimes, so slightly complicated command to wait for it to be written
Cmd: []string{"sh", "-c", "FILE=/var/log/squid/access.log; while [ ! -s $FILE ]; do sleep 1; done; cat $FILE"},
})
if err != nil {
t.Fatal(err)
}
t.Logf("Execing cat of /var/log/squid/access.log on %v", squidContainer.ID)
var squidLogs bytes.Buffer
err = client.StartExec(logExec.ID, docker.StartExecOptions{
OutputStream: &squidLogs,
})
if err != nil {
t.Fatal(err)
}
for {
tmp, _ := client.InspectExec(logExec.ID)
if !tmp.Running {
break
}
time.Sleep(100 * time.Millisecond)
}
t.Logf("Squid logs: %v", squidLogs.String())
// Of the format:
// 1445018173.730 3163 10.0.0.1 TCP_MISS/200 5706 CONNECT ecs.us-west-2.amazonaws.com:443 - HIER_DIRECT/54.240.250.253 -
// 1445018173.730 3103 10.0.0.1 TCP_MISS/200 3117 CONNECT ecs.us-west-2.amazonaws.com:443 - HIER_DIRECT/54.240.250.253 -
// 1445018173.730 3025 10.0.0.1 TCP_MISS/200 3336 CONNECT ecs-a-1.us-west-2.amazonaws.com:443 - HIER_DIRECT/54.240.249.4 -
// 1445018173.731 3086 10.0.0.1 TCP_MISS/200 3411 CONNECT ecs-t-1.us-west-2.amazonaws.com:443 - HIER_DIRECT/54.240.254.59
allAddressesRegex := regexp.MustCompile("CONNECT [^ ]+ ")
// Match just the host+port it's proxying to
matches := allAddressesRegex.FindAllStringSubmatch(squidLogs.String(), -1)
t.Logf("Proxy connections: %v", matches)
dedupedMatches := map[string]struct{}{}
for _, match := range matches {
dedupedMatches[match[0]] = struct{}{}
}
if len(dedupedMatches) < 3 {
t.Errorf("Expected 3 matches, actually had %d matches: %+v", len(dedupedMatches), dedupedMatches)
}
}
// TestAwslogsDriver verifies that container logs are sent to Amazon CloudWatch Logs with awslogs as the log driver
func TestAwslogsDriver(t *testing.T) {
RequireDockerVersion(t, ">=1.9.0") // awslogs drivers available from docker 1.9.0
cwlClient := cloudwatchlogs.New(session.New(), aws.NewConfig().WithRegion(*ECS.Config.Region))
// Test whether the log group existed or not
respDescribeLogGroups, err := cwlClient.DescribeLogGroups(&cloudwatchlogs.DescribeLogGroupsInput{
LogGroupNamePrefix: aws.String(awslogsLogGroupName),
})
if err != nil {
t.Fatalf("CloudWatchLogs describe log groups error: %v", err)
}
logGroupExists := false
for i := 0; i < len(respDescribeLogGroups.LogGroups); i++ {
if *respDescribeLogGroups.LogGroups[i].LogGroupName == awslogsLogGroupName {
logGroupExists = true
break
}
}
if !logGroupExists {
_, err := cwlClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{
LogGroupName: aws.String(awslogsLogGroupName),
})
if err != nil {
t.Fatalf("Failed to create log group %s : %v", awslogsLogGroupName, err)
}
}
agentOptions := AgentOptions{
ExtraEnvironment: map[string]string{
"ECS_AVAILABLE_LOGGING_DRIVERS": `["awslogs"]`,
},
}
agent := RunAgent(t, &agentOptions)
defer agent.Cleanup()
agent.RequireVersion(">=1.9.0") //Required for awslogs driver
tdOverrides := make(map[string]string)
tdOverrides["$$$TEST_REGION$$$"] = *ECS.Config.Region
testTask, err := agent.StartTaskWithTaskDefinitionOverrides(t, "awslogs", tdOverrides)
if err != nil {
t.Fatalf("Expected to start task using awslogs driver failed: %v", err)
}
// Wait for the container to start
testTask.WaitRunning(waitTaskStateChangeDuration)
containerId, err := agent.ResolveTaskDockerID(testTask, "awslogs")
if err != nil {
t.Fatalf("Failed to get the container ID")
}
// Delete the log stream after the test
defer func() {
cwlClient.DeleteLogStream(&cloudwatchlogs.DeleteLogStreamInput{
LogGroupName: aws.String(awslogsLogGroupName),
LogStreamName: aws.String(containerId),
})
}()
params := &cloudwatchlogs.GetLogEventsInput{
LogGroupName: aws.String(awslogsLogGroupName),
LogStreamName: aws.String(containerId),
}
resp, err := cwlClient.GetLogEvents(params)
if err != nil {
t.Fatalf("CloudWatchLogs get log failed: %v", err)
}
if len(resp.Events) != 1 {
t.Errorf("Get unexpected number of log events: %d", len(resp.Events))
} else if *resp.Events[0].Message != "hello world" {
t.Errorf("Got log events message unexpected: %s", *resp.Events[0].Message)
}
}
func TestTaskCleanup(t *testing.T) {
// Set the task cleanup time to just over a minute.
os.Setenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION", "70s")
agent := RunAgent(t, nil)
defer func() {
agent.Cleanup()
os.Unsetenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION")
}()
// Start a task and get the container id once the task transitions to RUNNING.
task, err := agent.StartTask(t, "nginx")
if err != nil {
t.Fatalf("Error starting task: %v", err)
}
err = task.WaitRunning(2 * time.Minute)
if err != nil {
t.Fatalf("Error waiting for running task: %v", err)
}
dockerId, err := agent.ResolveTaskDockerID(task, "nginx")
if err != nil {
t.Fatalf("Error resolving docker id for container in task: %v", err)
}
// We should be able to inspect the container ID from docker at this point.
_, err = agent.DockerClient.InspectContainer(dockerId)
if err != nil {
t.Fatalf("Error inspecting container in task: %v", err)
}
// Stop the task and sleep for 2 minutes to let the task be cleaned up.
err = agent.DockerClient.StopContainer(dockerId, 1)
if err != nil {
t.Fatalf("Error stoppping task: %v", err)
}
err = task.WaitStopped(1 * time.Minute)
if err != nil {
t.Fatalf("Error waiting for task stopped: %v", err)
}
time.Sleep(2 * time.Minute)
// We should not be able to describe the container now since it has been cleaned up.
_, err = agent.DockerClient.InspectContainer(dockerId)
if err == nil {
t.Fatalf("Expected error inspecting container in task")
}
}
// TestTelemetry tests whether agent can send metrics to TACS
func TestTelemetry(t *testing.T) {
// Try to use a new cluster for this test, ensure no other task metrics for this cluster
newClusterName := "ecstest-telemetry-" + uuid.New()
_, err := ECS.CreateCluster(&ecs.CreateClusterInput{
ClusterName: aws.String(newClusterName),
})
if err != nil {
t.Fatalf("Failed to create cluster %s : %v", newClusterName, err)
}
defer DeleteCluster(t, newClusterName)
agentOptions := AgentOptions{
ExtraEnvironment: map[string]string{
"ECS_CLUSTER": newClusterName,
},
}
agent := RunAgent(t, &agentOptions)
defer agent.Cleanup()
params := &cloudwatch.GetMetricStatisticsInput{
MetricName: aws.String("CPUUtilization"),
Namespace: aws.String("AWS/ECS"),
Period: aws.Int64(60),
Statistics: []*string{
aws.String("Average"),
aws.String("SampleCount"),
},
Dimensions: []*cloudwatch.Dimension{
{
Name: aws.String("ClusterName"),
Value: aws.String(newClusterName),
},
},
}
params.StartTime = aws.Time(RoundTimeUp(time.Now(), time.Minute).UTC())
params.EndTime = aws.Time((*params.StartTime).Add(waitMetricsInCloudwatchDuration).UTC())
// wait for the agent start and ensure no task is running
time.Sleep(waitMetricsInCloudwatchDuration)
cwclient := cloudwatch.New(session.New(), aws.NewConfig().WithRegion(*ECS.Config.Region))
if err = VerifyMetrics(cwclient, params, true); err != nil {
t.Errorf("Before task running, verify metrics for CPU utilization failed: %v", err)
}
params.MetricName = aws.String("MemoryUtilization")
if err = VerifyMetrics(cwclient, params, true); err != nil {
t.Errorf("Before task running, verify metrics for memory utilization failed: %v", err)
}
testTask, err := agent.StartTask(t, "telemetry")
if err != nil {
t.Fatalf("Expected to start telemetry task: %v", err)
}
// Wait for the task to run and the agent to send back metrics
err = testTask.WaitRunning(waitTaskStateChangeDuration)
if err != nil {
t.Fatalf("Error start telemetry task: %v", err)
}
time.Sleep(waitMetricsInCloudwatchDuration)
params.EndTime = aws.Time(RoundTimeUp(time.Now(), time.Minute).UTC())
params.StartTime = aws.Time((*params.EndTime).Add(-waitMetricsInCloudwatchDuration).UTC())
params.MetricName = aws.String("CPUUtilization")
if err = VerifyMetrics(cwclient, params, false); err != nil {
t.Errorf("Task is running, verify metrics for CPU utilization failed: %v", err)
}
params.MetricName = aws.String("MemoryUtilization")
if err = VerifyMetrics(cwclient, params, false); err != nil {
t.Errorf("Task is running, verify metrics for memory utilization failed: %v", err)
}
err = testTask.Stop()
if err != nil {
t.Fatalf("Failed to stop the telemetry task: %v", err)
}
err = testTask.WaitStopped(waitTaskStateChangeDuration)
if err != nil {
t.Fatalf("Waiting for task stop error: %v", err)
}
time.Sleep(waitMetricsInCloudwatchDuration)
params.EndTime = aws.Time(RoundTimeUp(time.Now(), time.Minute).UTC())
params.StartTime = aws.Time((*params.EndTime).Add(-waitMetricsInCloudwatchDuration).UTC())
params.MetricName = aws.String("CPUUtilization")
if err = VerifyMetrics(cwclient, params, true); err != nil {
t.Errorf("Task stopped: verify metrics for CPU utilization failed: %v", err)
}
params.MetricName = aws.String("MemoryUtilization")
if err = VerifyMetrics(cwclient, params, true); err != nil {
t.Errorf("Task stopped, verify metrics for memory utilization failed: %v", err)
}
}
func TestTaskIamRoles(t *testing.T) {
// The test runs only when the environment TEST_IAM_ROLE was set
if os.Getenv("TEST_TASK_IAM_ROLE") != "true" {
t.Skip("Skipping test TaskIamRole, as TEST_IAM_ROLE isn't set true")
}
roleArn := os.Getenv("TASK_IAM_ROLE_ARN")
if utils.ZeroOrNil(roleArn) {
t.Logf("TASK_IAM_ROLE_ARN not set, will try to use the role attached to instance profile")
roles, err := GetInstanceIAMRole()
if err != nil {
t.Fatalf("Error getting IAM Roles from instance profile, err: %v", err)
}
roleArn = *roles[0].Arn
}
agentOptions := &AgentOptions{
ExtraEnvironment: map[string]string{
"ECS_ENABLE_TASK_IAM_ROLE": "true",
},
PortBindings: map[docker.Port]map[string]string{
"51679/tcp": map[string]string{
"HostIP": "0.0.0.0",
"HostPort": "51679",
},
},
}
agent := RunAgent(t, agentOptions)
defer agent.Cleanup()
tdOverride := make(map[string]string)
tdOverride["$$$TASK_ROLE$$$"] = roleArn
tdOverride["$$$TEST_REGION$$$"] = *ECS.Config.Region
task, err := agent.StartTaskWithTaskDefinitionOverrides(t, "iam-roles", tdOverride)
if err != nil {
t.Fatalf("Error start iam-roles task: %v", err)
}
err = task.WaitRunning(waitTaskStateChangeDuration)
if err != nil {
t.Fatalf("Error waiting for task to run: %v", err)
}
containerId, err := agent.ResolveTaskDockerID(task, "container-with-iamrole")
if err != nil {
t.Fatalf("Error resolving docker id for container in task: %v", err)
}
// TaskIAMRoles enabled contaienr should have the ExtraEnvironment variable AWS_CONTAINER_CREDENTIALS_RELATIVE_URI
containerMetaData, err := agent.DockerClient.InspectContainer(containerId)
if err != nil {
t.Fatalf("Could not inspect container for task: %v", err)
}
iamRoleEnabled := false
if containerMetaData.Config != nil {
for _, env := range containerMetaData.Config.Env {
if strings.HasPrefix(env, "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI=") {
iamRoleEnabled = true
break
}
}
}
if !iamRoleEnabled {
task.Stop()
t.Fatalf("Could not found AWS_CONTAINER_CREDENTIALS_RELATIVE_URI in the container envrionment variable")
}
// Task will only run one command "aws ec2 describe-regions"
err = task.WaitStopped(30 * time.Second)
if err != nil {
t.Fatalf("Waiting task to stop error : %v", err)
}
containerMetaData, err = agent.DockerClient.InspectContainer(containerId)
if err != nil {
t.Fatalf("Could not inspect container for task: %v", err)
}
if containerMetaData.State.ExitCode != 0 {
t.Fatalf("Container exit code non-zero: %v", containerMetaData.State.ExitCode)
}
// Search the audit log to verify the credential request
err = SearchStrInDir(filepath.Join(agent.TestDir, "log"), "audit.log.", *task.TaskArn)
if err != nil {
t.Fatalf("Verify credential request failed, err: %v", err)
}
}
// TestMemoryOvercommit tests the MemoryReservation of container can be configured in task definition
func TestMemoryOvercommit(t *testing.T) {
agent := RunAgent(t, nil)
defer agent.Cleanup()
memoryReservation := int64(50)
tdOverride := make(map[string]string)
tdOverride["$$$$MEMORY_RESERVATION$$$$"] = strconv.FormatInt(memoryReservation, 10)
task, err := agent.StartTaskWithTaskDefinitionOverrides(t, "memory-overcommit", tdOverride)
if err != nil {
t.Fatalf("Error starting task: %v", err)
}
defer task.Stop()
err = task.WaitRunning(waitTaskStateChangeDuration)
if err != nil {
t.Fatalf("Error waiting for running task: %v", err)
}
containerId, err := agent.ResolveTaskDockerID(task, "memory-overcommit")
if err != nil {
t.Fatalf("Error resolving docker id for container in task: %v", err)
}
containerMetaData, err := agent.DockerClient.InspectContainer(containerId)
if err != nil {
t.Fatalf("Could not inspect container for task: %v", err)
}
if containerMetaData.HostConfig.MemoryReservation != memoryReservation*1024*1024 {
t.Fatalf("MemoryReservation in container metadata is not as expected: %v, expected: %v", containerMetaData.HostConfig.MemoryReservation, memoryReservation*1024*1024)
}
}
// TestNetworkModeBridge tests the container network can be configured
// as host mode in task definition
func TestNetworkModeHost(t *testing.T) {
agent := RunAgent(t, nil)
defer agent.Cleanup()
err := networkModeTest(t, agent, "host")
if err != nil {
t.Fatalf("Networking mode host testing failed, err: %v", err)
}
}
// TestNetworkModeBridge tests the container network can be configured
// as none mode in task definition
func TestNetworkModeNone(t *testing.T) {
agent := RunAgent(t, nil)
defer agent.Cleanup()
err := networkModeTest(t, agent, "none")
if err != nil {
t.Fatalf("Networking mode none testing failed, err: %v", err)
}
}
// TestNetworkModeBridge tests the container network can be configured
// as bridge mode in task definition
func TestNetworkModeBridge(t *testing.T) {
agent := RunAgent(t, nil)
defer agent.Cleanup()
err := networkModeTest(t, agent, "bridge")
if err != nil {
t.Fatalf("Networking mode bridge testing failed, err: %v", err)
}
}
// TestNetworkMode tests the contaienr network mode is configured in task definition correctly
func networkModeTest(t *testing.T, agent *TestAgent, mode string) error {
tdOverride := make(map[string]string)
// Test the host network mode
tdOverride["$$$$NETWORK_MODE$$$$"] = mode
task, err := agent.StartTaskWithTaskDefinitionOverrides(t, "network-mode", tdOverride)
if err != nil {
return fmt.Errorf("error starting task with network %v, err: %v", mode, err)
}
defer task.Stop()
err = task.WaitRunning(waitTaskStateChangeDuration)
if err != nil {
return fmt.Errorf("error waiting for task running, err: %v", err)
}
containerId, err := agent.ResolveTaskDockerID(task, "network-"+mode)
if err != nil {
return fmt.Errorf("error resolving docker id for container \"network-none\": %v", err)
}
networks, err := agent.GetContainerNetworkMode(containerId)
if err != nil {
return err
}
if len(networks) != 1 {
return fmt.Errorf("found multiple networks in container config")
}
if networks[0] != mode {
return fmt.Errorf("did not found the expected network mode")
}
return nil
}
| 1 | 14,265 | should this be a separate/new functional test? | aws-amazon-ecs-agent | go |
@@ -458,6 +458,19 @@ describe Mongoid::QueryCache do
end
end
+ context 'when querying colleciton larger than the batch size' do
+ before do
+ 101.times { Band.create! }
+ end
+
+ it 'does not raise an exception when querying multiple times' do
+ expect do
+ Band.all.to_a
+ Band.all.to_a
+ end.not_to raise_error
+ end
+ end
+
context "when query caching is enabled and the batch_size is set" do
around(:each) do |example| | 1 | # frozen_string_literal: true
# encoding: utf-8
require "spec_helper"
describe Mongoid::QueryCache do
around do |spec|
Mongoid::QueryCache.clear_cache
Mongoid::QueryCache.cache { spec.run }
end
before(:all) do
# It is likely that there are other session leaks in the driver
# and/or Mongoid that are unrelated to the query cache. Clear the
# SessionRegistry at the start of these tests in order to detect leaks that
# occur only within the scope of these tests.
#
# Other session leaks will be detected and addressed as part of RUBY-2391.
SessionRegistry.instance.clear_registry
end
after do
SessionRegistry.instance.verify_sessions_ended!
end
context 'when iterating over objects sharing the same base' do
let(:server) do
relations.first.mongo_client.cluster.next_primary
end
before do
person = Person.create
3.times do
person.send(relation).create
end
person.save
end
let!(:relations) do
Person.first.send(relation).to_a
end
context 'when the association is has-many' do
let(:relation) do
:posts
end
context 'does not query for the relation and instead sets the base' do
before do
Mongoid::QueryCache.enabled = false
end
it 'queries for each access to the base' do
expect(server).to receive(:with_connection).exactly(0).times.and_call_original
relations.each do |object|
object.person
end
end
end
end
context 'when the association is embeds-many' do
let(:relation) do
:symptoms
end
context 'when query cache is disabled' do
before do
Mongoid::QueryCache.enabled = false
end
it 'does not query for access to the base' do
expect(server).to receive(:context).exactly(0).times.and_call_original
relations.each do |object|
object.person
end
end
end
context 'when query cache is enabled' do
before do
Mongoid::QueryCache.enabled = true
end
it 'does not query for access to the base' do
expect(server).to receive(:context).exactly(0).times.and_call_original
relations.each do |object|
object.person
end
end
end
end
end
context 'when driver query cache exists' do
require_driver_query_cache
before do
Band.all.to_a
Band.create!
end
it 'recognizes the driver query cache' do
expect(defined?(Mongo::QueryCache)).to_not be_nil
end
context 'when query cache enabled' do
it 'uses the driver query cache' do
expect(Mongo::QueryCache).to receive(:enabled=).and_call_original
Mongoid::QueryCache.enabled = true
expect(Mongoid::QueryCache.enabled?).to be(true)
expect(Mongo::QueryCache.enabled?).to be(true)
end
end
context 'when query cache disabled' do
it 'uses the driver query cache' do
expect(Mongo::QueryCache).to receive(:enabled=).and_call_original
Mongoid::QueryCache.enabled = false
expect(Mongoid::QueryCache.enabled?).to be(false)
expect(Mongo::QueryCache.enabled?).to be(false)
end
end
context 'when block is cached' do
before do
Mongoid::QueryCache.enabled = false
end
it 'uses the driver query cache' do
expect(Mongo::QueryCache).to receive(:cache).and_call_original
Mongoid::QueryCache.cache do
expect(Mongo::QueryCache).to receive(:enabled?).exactly(2).and_call_original
expect(Mongoid::QueryCache.enabled?).to be(true)
expect(Mongo::QueryCache.enabled?).to be(true)
end
end
end
context 'when block is uncached' do
before do
Mongoid::QueryCache.enabled = true
end
it 'uses the driver query cache' do
expect(Mongo::QueryCache).to receive(:uncached).and_call_original
Mongoid::QueryCache.uncached do
expect(Mongo::QueryCache).to receive(:enabled?).exactly(2).and_call_original
expect(Mongoid::QueryCache.enabled?).to be(false)
expect(Mongo::QueryCache.enabled?).to be(false)
end
end
end
context 'when clear_cache is used' do
before do
Band.all.to_a
end
it 'has a nonempty query cache' do
expect(Mongoid::QueryCache.cache_table.count).to eq(1)
expect(Mongo::QueryCache.cache_table.count).to eq(1)
end
it 'uses the driver query cache' do
expect(Mongo::QueryCache).to receive(:clear_cache).and_call_original
Mongoid::QueryCache.clear_cache
expect(Mongoid::QueryCache.cache_table.count).to eq(0)
expect(Mongo::QueryCache.cache_table.count).to eq(0)
end
end
context 'when query cache used and cleared' do
it 'uses the driver query cache' do
expect_query(1) do
Band.all.to_a
Band.all.to_a
end
expect(Mongo::QueryCache).to receive(:cache_table).exactly(2).and_call_original
expect(Mongoid::QueryCache.cache_table.count).to eq(1)
expect(Mongo::QueryCache.cache_table.count).to eq(1)
end
end
end
context 'when drivers query cache does not exist' do
require_mongoid_query_cache
it 'does not recognize the driver query cache' do
expect(defined?(Mongo::QueryCache)).to be_nil
end
end
context "when querying for a single document" do
[ :first, :one, :last ].each do |method|
before do
Band.all.send(method)
end
context "when query cache is disabled" do
before do
Mongoid::QueryCache.enabled = false
end
it "queries again" do
expect_query(1) do
Band.all.send(method)
end
end
end
context "with same selector" do
it "does not query again" do
expect_no_queries do
Band.all.send(method)
end
end
end
context "with different selector" do
it "queries again" do
expect_query(1) do
Band.where(id: 1).send(method)
end
end
end
end
end
context 'querying all documents after a single document' do
before do
3.times do
Person.create
end
end
it 'returns all documents' do
expect(Person.all.to_a.count).to eq(3)
Person.first
expect(Person.all.to_a.count).to eq(3)
end
context 'with conditions specified' do
it 'returns all documents' do
expect(Person.gt(age: 0).to_a.count).to eq(3)
Person.gt(age: 0).first
expect(Person.gt(age: 0).to_a.count).to eq(3)
end
end
context 'with order specified' do
it 'returns all documents' do
expect(Person.order_by(name: 1).to_a.count).to eq(3)
Person.order_by(name: 1).first
expect(Person.order_by(name: 1).to_a.count).to eq(3)
end
end
end
context "when querying in the same collection" do
before do
Band.all.to_a
end
context "when query cache is disabled" do
before do
Mongoid::QueryCache.enabled = false
end
it "queries again" do
expect_query(1) do
Band.all.to_a
end
end
end
context "with same selector" do
it "does not query again" do
expect_no_queries do
Band.all.to_a
end
end
context 'when the first query has a collation' do
min_server_version '3.4'
before do
Band.where(name: 'DEPECHE MODE').collation(locale: 'en_US', strength: 2).to_a
end
context "when the next query has the same collation" do
it "uses the cache" do
expect_no_queries do
Band.where(name: 'DEPECHE MODE').collation(locale: 'en_US', strength: 2).to_a
end
end
end
context "when the next query does not have the same collation" do
it "queries again" do
expect_query(1) do
Band.where(name: 'DEPECHE MODE').collation(locale: 'fr', strength: 2).to_a
end
end
end
context "when the next query does not have a collation" do
it "queries again" do
expect_query(1) do
Band.where(name: 'DEPECHE MODE').to_a
end
end
end
end
context "when the first query has no limit" do
let(:game) do
Game.create!(name: "2048")
end
before do
game.ratings.where(:value.gt => 5).asc(:id).all.to_a
end
context "when the next query has a limit" do
it "uses the cache" do
expect_no_queries do
game.ratings.where(:value.gt => 5).limit(2).asc(:id).to_a
end
end
end
end
context "when the first query has a limit" do
let(:game) do
Game.create!(name: "2048")
end
before do
game.ratings.where(:value.gt => 5).limit(3).asc(:id).all.to_a
end
context "when the next query has a limit" do
it "queries again" do
expect_query(1) do
game.ratings.where(:value.gt => 5).limit(2).asc(:id).to_a
end
end
end
context "when the new query does not have a limit" do
it "queries again" do
expect_query(1) do
game.ratings.where(:value.gt => 5).asc(:id).to_a
end
end
end
end
context "when querying only the first" do
let(:game) do
Game.create!(name: "2048")
end
before do
game.ratings.where(:value.gt => 5).asc(:id).all.to_a
end
it "does not query again" do
expect_no_queries do
game.ratings.where(:value.gt => 5).asc(:id).first
end
end
end
context "when limiting the result" do
it "does not query again" do
expect_query(0) do
Band.limit(2).all.to_a
end
end
end
context "when specifying a different skip value" do
before do
Band.limit(2).skip(1).all.to_a
end
it "queries again" do
expect_query(1) do
Band.limit(2).skip(3).all.to_a
end
end
end
end
context "with different selector" do
it "queries again" do
expect_query(1) do
Band.where(id: 1).to_a
end
end
end
context "when sorting documents" do
before do
Band.asc(:id).to_a
end
context "with different selector" do
it "queries again" do
expect_query(1) do
Band.desc(:id).to_a
end
end
end
it "does not query again" do
expect_query(0) do
Band.asc(:id).to_a
end
end
end
context "when query caching is enabled and the batch_size is set" do
around(:each) do |example|
query_cache_enabled = Mongoid::QueryCache.enabled?
Mongoid::QueryCache.enabled = true
example.run
Mongoid::QueryCache.enabled = query_cache_enabled
end
it "does not raise an error when requesting the second batch" do
expect {
Band.batch_size(4).where(:views.gte => 0).each do |doc|
doc.set(likes: Random.rand(100))
end
}.not_to raise_error
end
end
end
context "when querying in different collection" do
before do
Person.all.to_a
end
it "queries again" do
expect_query(1) do
Band.all.to_a
end
end
end
context "when inserting a new document" do
before do
Band.all.to_a
Band.create!
end
it "queries again" do
expect_query(1) do
Band.all.to_a
end
end
end
context "when deleting all documents" do
before do
Band.create!
Band.all.to_a
Band.delete_all
end
it "queries again" do
expect_query(1) do
Band.all.to_a
end
end
end
context "when destroying all documents" do
before do
Band.create!
Band.all.to_a
Band.destroy_all
end
it "queries again" do
expect_query(1) do
Band.all.to_a
end
end
end
context "when reloading a document" do
let!(:band_id) do
Band.create.id
end
context 'when query cache is disabled' do
before do
Mongoid::QueryCache.enabled = false
end
it "queries again" do
band = Band.find(band_id)
expect_query(1) do
band.reload
end
end
end
context 'when query cache is enabled' do
it "queries again" do
band = Band.find(band_id)
expect_query(1) do
band.reload
end
end
end
end
context "when querying collection smaller than the batch size" do
before do
99.times { Band.create! }
end
it "returns the right number of records" do
expect(Band.all.to_a.length).to eq(99)
end
it "#pluck returns the same count of objects" do
expect(Band.pluck(:name).length).to eq(99)
end
context "when loading all the documents" do
before do
Band.all.to_a
end
it "caches the complete result of the query" do
expect_no_queries do
expect(Band.all.to_a.length).to eq(99)
end
end
it "returns the same count of objects when using #pluck" do
expect(Band.pluck(:name).length).to eq(99)
end
end
end
context "when inserting an index" do
it "does not cache the query" do
expect(Mongoid::QueryCache).to receive(:cache_table).never
Band.collection.indexes.create_one(name: 1)
end
end
context 'when the initial query does not exhaust the results' do
before do
Mongoid::QueryCache.enabled = true
10.times { Band.create! }
Band.batch_size(4).all.any?
end
it 'does not cache the result' do
expect(Band.all.map(&:id).size).to eq(10)
end
context 'when a batch size smaller than the result set is specified' do
let(:batch_size) do
4
end
it 'does not cache the result' do
expect(Band.batch_size(batch_size).all.map(&:id).size).to eq(10)
end
end
end
end
| 1 | 12,831 | Can these also assert at least the length of arrays returned and ideally contents? | mongodb-mongoid | rb |
@@ -822,7 +822,7 @@ class Scheduler(object):
task.family = family
if not getattr(task, 'module', None):
task.module = module
- if not task.param_visibilities:
+ if not getattr(task, 'param_visibilities', None):
task.param_visibilities = _get_default(param_visibilities, {})
if not task.params:
task.set_params(params) | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The system for scheduling tasks and executing them in order.
Deals with dependencies, priorities, resources, etc.
The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them.
See :doc:`/central_scheduler` for more info.
"""
import collections
import inspect
import json
from luigi.batch_notifier import BatchNotifier
try:
import cPickle as pickle
except ImportError:
import pickle
import functools
import hashlib
import itertools
import logging
import os
import re
import time
import uuid
from luigi import six
from luigi import configuration
from luigi import notifications
from luigi import parameter
from luigi import task_history as history
from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN, \
BATCH_RUNNING
from luigi.task import Config
from luigi.parameter import ParameterVisibility
logger = logging.getLogger(__name__)
UPSTREAM_RUNNING = 'UPSTREAM_RUNNING'
UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT'
UPSTREAM_FAILED = 'UPSTREAM_FAILED'
UPSTREAM_DISABLED = 'UPSTREAM_DISABLED'
UPSTREAM_SEVERITY_ORDER = (
'',
UPSTREAM_RUNNING,
UPSTREAM_MISSING_INPUT,
UPSTREAM_FAILED,
UPSTREAM_DISABLED,
)
UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index
STATUS_TO_UPSTREAM_MAP = {
FAILED: UPSTREAM_FAILED,
RUNNING: UPSTREAM_RUNNING,
BATCH_RUNNING: UPSTREAM_RUNNING,
PENDING: UPSTREAM_MISSING_INPUT,
DISABLED: UPSTREAM_DISABLED,
}
WORKER_STATE_DISABLED = 'disabled'
WORKER_STATE_ACTIVE = 'active'
TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]')
RPC_METHODS = {}
_retry_policy_fields = [
"retry_count",
"disable_hard_timeout",
"disable_window",
]
RetryPolicy = collections.namedtuple("RetryPolicy", _retry_policy_fields)
def _get_empty_retry_policy():
return RetryPolicy(*[None] * len(_retry_policy_fields))
def rpc_method(**request_args):
def _rpc_method(fn):
# If request args are passed, return this function again for use as
# the decorator function with the request args attached.
fn_args = inspect.getargspec(fn)
assert not fn_args.varargs
assert fn_args.args[0] == 'self'
all_args = fn_args.args[1:]
defaults = dict(zip(reversed(all_args), reversed(fn_args.defaults or ())))
required_args = frozenset(arg for arg in all_args if arg not in defaults)
fn_name = fn.__name__
@functools.wraps(fn)
def rpc_func(self, *args, **kwargs):
actual_args = defaults.copy()
actual_args.update(dict(zip(all_args, args)))
actual_args.update(kwargs)
if not all(arg in actual_args for arg in required_args):
raise TypeError('{} takes {} arguments ({} given)'.format(
fn_name, len(all_args), len(actual_args)))
return self._request('/api/{}'.format(fn_name), actual_args, **request_args)
RPC_METHODS[fn_name] = rpc_func
return fn
return _rpc_method
class scheduler(Config):
retry_delay = parameter.FloatParameter(default=900.0)
remove_delay = parameter.FloatParameter(default=600.0)
worker_disconnect_delay = parameter.FloatParameter(default=60.0)
state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle')
batch_emails = parameter.BoolParameter(default=False, description="Send e-mails in batches rather than immediately")
# Jobs are disabled if we see more than retry_count failures in disable_window seconds.
# These disables last for disable_persist seconds.
disable_window = parameter.IntParameter(default=3600)
retry_count = parameter.IntParameter(default=999999999)
disable_hard_timeout = parameter.IntParameter(default=999999999)
disable_persist = parameter.IntParameter(default=86400)
max_shown_tasks = parameter.IntParameter(default=100000)
max_graph_nodes = parameter.IntParameter(default=100000)
record_task_history = parameter.BoolParameter(default=False)
prune_on_get_work = parameter.BoolParameter(default=False)
pause_enabled = parameter.BoolParameter(default=True)
send_messages = parameter.BoolParameter(default=True)
def _get_retry_policy(self):
return RetryPolicy(self.retry_count, self.disable_hard_timeout, self.disable_window)
class Failures(object):
"""
This class tracks the number of failures in a given time window.
Failures added are marked with the current timestamp, and this class counts
the number of failures in a sliding time window ending at the present.
"""
def __init__(self, window):
"""
Initialize with the given window.
:param window: how long to track failures for, as a float (number of seconds).
"""
self.window = window
self.failures = collections.deque()
self.first_failure_time = None
def add_failure(self):
"""
Add a failure event with the current timestamp.
"""
failure_time = time.time()
if not self.first_failure_time:
self.first_failure_time = failure_time
self.failures.append(failure_time)
def num_failures(self):
"""
Return the number of failures in the window.
"""
min_time = time.time() - self.window
while self.failures and self.failures[0] < min_time:
self.failures.popleft()
return len(self.failures)
def clear(self):
"""
Clear the failure queue.
"""
self.failures.clear()
def _get_default(x, default):
if x is not None:
return x
else:
return default
class OrderedSet(collections.MutableSet):
"""
Standard Python OrderedSet recipe found at http://code.activestate.com/recipes/576694/
Modified to include a peek function to get the last element
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def peek(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
return key
def pop(self, last=True):
key = self.peek(last)
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
class Task(object):
def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None,
params=None, param_visibilities=None, accepts_messages=False, tracking_url=None, status_message=None,
progress_percentage=None, retry_policy='notoptional'):
self.id = task_id
self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active)
self.workers = OrderedSet() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active
if deps is None:
self.deps = set()
else:
self.deps = set(deps)
self.status = status # PENDING, RUNNING, FAILED or DONE
self.time = time.time() # Timestamp when task was first added
self.updated = self.time
self.retry = None
self.remove = None
self.worker_running = None # the worker id that is currently running the task or None
self.time_running = None # Timestamp when picked up by worker
self.expl = None
self.priority = priority
self.resources = _get_default(resources, {})
self.family = family
self.module = module
self.param_visibilities = _get_default(param_visibilities, {})
self.params = {}
self.public_params = {}
self.hidden_params = {}
self.set_params(params)
self.accepts_messages = accepts_messages
self.retry_policy = retry_policy
self.failures = Failures(self.retry_policy.disable_window)
self.tracking_url = tracking_url
self.status_message = status_message
self.progress_percentage = progress_percentage
self.scheduler_message_responses = {}
self.scheduler_disable_time = None
self.runnable = False
self.batchable = False
self.batch_id = None
def __repr__(self):
return "Task(%r)" % vars(self)
def set_params(self, params):
self.params = _get_default(params, {})
self.public_params = {key: value for key, value in self.params.items() if
self.param_visibilities.get(key, ParameterVisibility.PUBLIC) == ParameterVisibility.PUBLIC}
self.hidden_params = {key: value for key, value in self.params.items() if
self.param_visibilities.get(key, ParameterVisibility.PUBLIC) == ParameterVisibility.HIDDEN}
# TODO(2017-08-10) replace this function with direct calls to batchable
# this only exists for backward compatibility
def is_batchable(self):
try:
return self.batchable
except AttributeError:
return False
def add_failure(self):
self.failures.add_failure()
def has_excessive_failures(self):
if self.failures.first_failure_time is not None:
if (time.time() >= self.failures.first_failure_time + self.retry_policy.disable_hard_timeout):
return True
logger.debug('%s task num failures is %s and limit is %s', self.id, self.failures.num_failures(), self.retry_policy.retry_count)
if self.failures.num_failures() >= self.retry_policy.retry_count:
logger.debug('%s task num failures limit(%s) is exceeded', self.id, self.retry_policy.retry_count)
return True
return False
@property
def pretty_id(self):
param_str = ', '.join(u'{}={}'.format(key, value) for key, value in sorted(self.public_params.items()))
return u'{}({})'.format(self.family, param_str)
class Worker(object):
"""
Structure for tracking worker activity and keeping their references.
"""
def __init__(self, worker_id, last_active=None):
self.id = worker_id
self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host)
self.last_active = last_active or time.time() # seconds since epoch
self.last_get_work = None
self.started = time.time() # seconds since epoch
self.tasks = set() # task objects
self.info = {}
self.disabled = False
self.rpc_messages = []
def add_info(self, info):
self.info.update(info)
def update(self, worker_reference, get_work=False):
if worker_reference:
self.reference = worker_reference
self.last_active = time.time()
if get_work:
self.last_get_work = time.time()
def prune(self, config):
# Delete workers that haven't said anything for a while (probably killed)
if self.last_active + config.worker_disconnect_delay < time.time():
return True
def get_tasks(self, state, *statuses):
num_self_tasks = len(self.tasks)
num_state_tasks = sum(len(state._status_tasks[status]) for status in statuses)
if num_self_tasks < num_state_tasks:
return six.moves.filter(lambda task: task.status in statuses, self.tasks)
else:
return six.moves.filter(lambda task: self.id in task.workers, state.get_active_tasks_by_status(*statuses))
def is_trivial_worker(self, state):
"""
If it's not an assistant having only tasks that are without
requirements.
We have to pass the state parameter for optimization reasons.
"""
if self.assistant:
return False
return all(not task.resources for task in self.get_tasks(state, PENDING))
@property
def assistant(self):
return self.info.get('assistant', False)
@property
def enabled(self):
return not self.disabled
@property
def state(self):
if self.enabled:
return WORKER_STATE_ACTIVE
else:
return WORKER_STATE_DISABLED
def add_rpc_message(self, name, **kwargs):
# the message has the format {'name': <function_name>, 'kwargs': <function_kwargs>}
self.rpc_messages.append({'name': name, 'kwargs': kwargs})
def fetch_rpc_messages(self):
messages = self.rpc_messages[:]
del self.rpc_messages[:]
return messages
def __str__(self):
return self.id
class SimpleTaskState(object):
"""
Keep track of the current state and handle persistance.
The point of this class is to enable other ways to keep state, eg. by using a database
These will be implemented by creating an abstract base class that this and other classes
inherit from.
"""
def __init__(self, state_path):
self._state_path = state_path
self._tasks = {} # map from id to a Task object
self._status_tasks = collections.defaultdict(dict)
self._active_workers = {} # map from id to a Worker object
self._task_batchers = {}
def get_state(self):
return self._tasks, self._active_workers, self._task_batchers
def set_state(self, state):
self._tasks, self._active_workers = state[:2]
if len(state) >= 3:
self._task_batchers = state[2]
def dump(self):
try:
with open(self._state_path, 'wb') as fobj:
pickle.dump(self.get_state(), fobj)
except IOError:
logger.warning("Failed saving scheduler state", exc_info=1)
else:
logger.info("Saved state in %s", self._state_path)
# prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control?
def load(self):
if os.path.exists(self._state_path):
logger.info("Attempting to load state from %s", self._state_path)
try:
with open(self._state_path, 'rb') as fobj:
state = pickle.load(fobj)
except BaseException:
logger.exception("Error when loading state. Starting from empty state.")
return
self.set_state(state)
self._status_tasks = collections.defaultdict(dict)
for task in six.itervalues(self._tasks):
self._status_tasks[task.status][task.id] = task
else:
logger.info("No prior state file exists at %s. Starting with empty state", self._state_path)
def get_active_tasks(self):
return six.itervalues(self._tasks)
def get_active_tasks_by_status(self, *statuses):
return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in statuses)
def get_active_task_count_for_status(self, status):
if status:
return len(self._status_tasks[status])
else:
return len(self._tasks)
def get_batch_running_tasks(self, batch_id):
assert batch_id is not None
return [
task for task in self.get_active_tasks_by_status(BATCH_RUNNING)
if task.batch_id == batch_id
]
def set_batcher(self, worker_id, family, batcher_args, max_batch_size):
self._task_batchers.setdefault(worker_id, {})
self._task_batchers[worker_id][family] = (batcher_args, max_batch_size)
def get_batcher(self, worker_id, family):
return self._task_batchers.get(worker_id, {}).get(family, (None, 1))
def num_pending_tasks(self):
"""
Return how many tasks are PENDING + RUNNING. O(1).
"""
return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING])
def get_task(self, task_id, default=None, setdefault=None):
if setdefault:
task = self._tasks.setdefault(task_id, setdefault)
self._status_tasks[task.status][task.id] = task
return task
else:
return self._tasks.get(task_id, default)
def has_task(self, task_id):
return task_id in self._tasks
def re_enable(self, task, config=None):
task.scheduler_disable_time = None
task.failures.clear()
if config:
self.set_status(task, FAILED, config)
task.failures.clear()
def set_batch_running(self, task, batch_id, worker_id):
self.set_status(task, BATCH_RUNNING)
task.batch_id = batch_id
task.worker_running = worker_id
task.resources_running = task.resources
task.time_running = time.time()
def set_status(self, task, new_status, config=None):
if new_status == FAILED:
assert config is not None
if new_status == DISABLED and task.status in (RUNNING, BATCH_RUNNING):
return
remove_on_failure = task.batch_id is not None and not task.batchable
if task.status == DISABLED:
if new_status == DONE:
self.re_enable(task)
# don't allow workers to override a scheduler disable
elif task.scheduler_disable_time is not None and new_status != DISABLED:
return
if task.status == RUNNING and task.batch_id is not None and new_status != RUNNING:
for batch_task in self.get_batch_running_tasks(task.batch_id):
self.set_status(batch_task, new_status, config)
batch_task.batch_id = None
task.batch_id = None
if new_status == FAILED and task.status != DISABLED:
task.add_failure()
if task.has_excessive_failures():
task.scheduler_disable_time = time.time()
new_status = DISABLED
if not config.batch_emails:
notifications.send_error_email(
'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id),
'{task} failed {failures} times in the last {window} seconds, so it is being '
'disabled for {persist} seconds'.format(
failures=task.retry_policy.retry_count,
task=task.id,
window=config.disable_window,
persist=config.disable_persist,
))
elif new_status == DISABLED:
task.scheduler_disable_time = None
if new_status != task.status:
self._status_tasks[task.status].pop(task.id)
self._status_tasks[new_status][task.id] = task
task.status = new_status
task.updated = time.time()
if new_status == FAILED:
task.retry = time.time() + config.retry_delay
if remove_on_failure:
task.remove = time.time()
def fail_dead_worker_task(self, task, config, assistants):
# If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic
if task.status in (BATCH_RUNNING, RUNNING) and task.worker_running and task.worker_running not in task.stakeholders | assistants:
logger.info("Task %r is marked as running by disconnected worker %r -> marking as "
"FAILED with retry delay of %rs", task.id, task.worker_running,
config.retry_delay)
task.worker_running = None
self.set_status(task, FAILED, config)
task.retry = time.time() + config.retry_delay
def update_status(self, task, config):
# Mark tasks with no remaining active stakeholders for deletion
if (not task.stakeholders) and (task.remove is None) and (task.status != RUNNING):
# We don't check for the RUNNING case, because that is already handled
# by the fail_dead_worker_task function.
logger.debug("Task %r has no stakeholders anymore -> might remove "
"task in %s seconds", task.id, config.remove_delay)
task.remove = time.time() + config.remove_delay
# Re-enable task after the disable time expires
if task.status == DISABLED and task.scheduler_disable_time is not None:
if time.time() - task.scheduler_disable_time > config.disable_persist:
self.re_enable(task, config)
# Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0
if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time():
self.set_status(task, PENDING, config)
def may_prune(self, task):
return task.remove and time.time() >= task.remove
def inactivate_tasks(self, delete_tasks):
# The terminology is a bit confusing: we used to "delete" tasks when they became inactive,
# but with a pluggable state storage, you might very well want to keep some history of
# older tasks as well. That's why we call it "inactivate" (as in the verb)
for task in delete_tasks:
task_obj = self._tasks.pop(task)
self._status_tasks[task_obj.status].pop(task)
def get_active_workers(self, last_active_lt=None, last_get_work_gt=None):
for worker in six.itervalues(self._active_workers):
if last_active_lt is not None and worker.last_active >= last_active_lt:
continue
last_get_work = worker.last_get_work
if last_get_work_gt is not None and (
last_get_work is None or last_get_work <= last_get_work_gt):
continue
yield worker
def get_assistants(self, last_active_lt=None):
return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt))
def get_worker_ids(self):
return self._active_workers.keys() # only used for unit tests
def get_worker(self, worker_id):
return self._active_workers.setdefault(worker_id, Worker(worker_id))
def inactivate_workers(self, delete_workers):
# Mark workers as inactive
for worker in delete_workers:
self._active_workers.pop(worker)
self._remove_workers_from_tasks(delete_workers)
def _remove_workers_from_tasks(self, workers, remove_stakeholders=True):
for task in self.get_active_tasks():
if remove_stakeholders:
task.stakeholders.difference_update(workers)
task.workers -= workers
def disable_workers(self, worker_ids):
self._remove_workers_from_tasks(worker_ids, remove_stakeholders=False)
for worker_id in worker_ids:
worker = self.get_worker(worker_id)
worker.disabled = True
worker.tasks.clear()
class Scheduler(object):
"""
Async scheduler that can handle multiple workers, etc.
Can be run locally or on a server (using RemoteScheduler + server.Server).
"""
def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs):
"""
Keyword Arguments:
:param config: an object of class "scheduler" or None (in which the global instance will be used)
:param resources: a dict of str->int constraints
:param task_history_impl: ignore config and use this object as the task history
"""
self._config = config or scheduler(**kwargs)
self._state = SimpleTaskState(self._config.state_path)
if task_history_impl:
self._task_history = task_history_impl
elif self._config.record_task_history:
from luigi import db_task_history # Needs sqlalchemy, thus imported here
self._task_history = db_task_history.DbTaskHistory()
else:
self._task_history = history.NopHistory()
self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter?
self._make_task = functools.partial(Task, retry_policy=self._config._get_retry_policy())
self._worker_requests = {}
self._paused = False
if self._config.batch_emails:
self._email_batcher = BatchNotifier()
def load(self):
self._state.load()
def dump(self):
self._state.dump()
if self._config.batch_emails:
self._email_batcher.send_email()
@rpc_method()
def prune(self):
logger.debug("Starting pruning of task graph")
self._prune_workers()
self._prune_tasks()
self._prune_emails()
logger.debug("Done pruning task graph")
def _prune_workers(self):
remove_workers = []
for worker in self._state.get_active_workers():
if worker.prune(self._config):
logger.debug("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay)
remove_workers.append(worker.id)
self._state.inactivate_workers(remove_workers)
def _prune_tasks(self):
assistant_ids = {w.id for w in self._state.get_assistants()}
remove_tasks = []
for task in self._state.get_active_tasks():
self._state.fail_dead_worker_task(task, self._config, assistant_ids)
self._state.update_status(task, self._config)
if self._state.may_prune(task):
logger.info("Removing task %r", task.id)
remove_tasks.append(task.id)
self._state.inactivate_tasks(remove_tasks)
def _prune_emails(self):
if self._config.batch_emails:
self._email_batcher.update()
def _update_worker(self, worker_id, worker_reference=None, get_work=False):
# Keep track of whenever the worker was last active.
# For convenience also return the worker object.
worker = self._state.get_worker(worker_id)
worker.update(worker_reference, get_work=get_work)
return worker
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
"""
task.priority = prio = max(prio, task.priority)
for dep in task.deps or []:
t = self._state.get_task(dep)
if t is not None and prio > t.priority:
self._update_priority(t, prio, worker)
@rpc_method()
def add_task_batcher(self, worker, task_family, batched_args, max_batch_size=float('inf')):
self._state.set_batcher(worker, task_family, batched_args, max_batch_size)
@rpc_method()
def forgive_failures(self, task_id=None):
status = PENDING
task = self._state.get_task(task_id)
if task is None:
return {"task_id": task_id, "status": None}
# we forgive only failures
if task.status == FAILED:
# forgive but do not forget
self._update_task_history(task, status)
self._state.set_status(task, status, self._config)
return {"task_id": task_id, "status": task.status}
@rpc_method()
def add_task(self, task_id=None, status=PENDING, runnable=True,
deps=None, new_deps=None, expl=None, resources=None,
priority=0, family='', module=None, params=None, param_visibilities=None, accepts_messages=False,
assistant=False, tracking_url=None, worker=None, batchable=None,
batch_id=None, retry_policy_dict=None, owners=None, **kwargs):
"""
* add task identified by task_id if it doesn't exist
* if deps is not None, update dependency list
* update status of task
* add additional workers/stakeholders
* update priority when needed
"""
assert worker is not None
worker_id = worker
worker = self._update_worker(worker_id)
resources = {} if resources is None else resources.copy()
if retry_policy_dict is None:
retry_policy_dict = {}
retry_policy = self._generate_retry_policy(retry_policy_dict)
if worker.enabled:
_default_task = self._make_task(
task_id=task_id, status=PENDING, deps=deps, resources=resources,
priority=priority, family=family, module=module, params=params, param_visibilities=param_visibilities,
)
else:
_default_task = None
task = self._state.get_task(task_id, setdefault=_default_task)
if task is None or (task.status != RUNNING and not worker.enabled):
return
# for setting priority, we'll sometimes create tasks with unset family and params
if not task.family:
task.family = family
if not getattr(task, 'module', None):
task.module = module
if not task.param_visibilities:
task.param_visibilities = _get_default(param_visibilities, {})
if not task.params:
task.set_params(params)
if batch_id is not None:
task.batch_id = batch_id
if status == RUNNING and not task.worker_running:
task.worker_running = worker_id
if batch_id:
# copy resources_running of the first batch task
batch_tasks = self._state.get_batch_running_tasks(batch_id)
task.resources_running = batch_tasks[0].resources_running.copy()
task.time_running = time.time()
if accepts_messages is not None:
task.accepts_messages = accepts_messages
if tracking_url is not None or task.status != RUNNING:
task.tracking_url = tracking_url
if task.batch_id is not None:
for batch_task in self._state.get_batch_running_tasks(task.batch_id):
batch_task.tracking_url = tracking_url
if batchable is not None:
task.batchable = batchable
if task.remove is not None:
task.remove = None # unmark task for removal so it isn't removed after being added
if expl is not None:
task.expl = expl
if task.batch_id is not None:
for batch_task in self._state.get_batch_running_tasks(task.batch_id):
batch_task.expl = expl
task_is_not_running = task.status not in (RUNNING, BATCH_RUNNING)
task_started_a_run = status in (DONE, FAILED, RUNNING)
running_on_this_worker = task.worker_running == worker_id
if task_is_not_running or (task_started_a_run and running_on_this_worker) or new_deps:
# don't allow re-scheduling of task while it is running, it must either fail or succeed on the worker actually running it
if status != task.status or status == PENDING:
# Update the DB only if there was a acctual change, to prevent noise.
# We also check for status == PENDING b/c that's the default value
# (so checking for status != task.status woule lie)
self._update_task_history(task, status)
self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config)
if status == FAILED and self._config.batch_emails:
batched_params, _ = self._state.get_batcher(worker_id, family)
if batched_params:
unbatched_params = {
param: value
for param, value in six.iteritems(task.params)
if param not in batched_params
}
else:
unbatched_params = task.params
try:
expl_raw = json.loads(expl)
except ValueError:
expl_raw = expl
self._email_batcher.add_failure(
task.pretty_id, task.family, unbatched_params, expl_raw, owners)
if task.status == DISABLED:
self._email_batcher.add_disable(
task.pretty_id, task.family, unbatched_params, owners)
if deps is not None:
task.deps = set(deps)
if new_deps is not None:
task.deps.update(new_deps)
if resources is not None:
task.resources = resources
if worker.enabled and not assistant:
task.stakeholders.add(worker_id)
# Task dependencies might not exist yet. Let's create dummy tasks for them for now.
# Otherwise the task dependencies might end up being pruned if scheduling takes a long time
for dep in task.deps or []:
t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority))
t.stakeholders.add(worker_id)
self._update_priority(task, priority, worker_id)
# Because some tasks (non-dynamic dependencies) are `_make_task`ed
# before we know their retry_policy, we always set it here
task.retry_policy = retry_policy
if runnable and status != FAILED and worker.enabled:
task.workers.add(worker_id)
self._state.get_worker(worker_id).tasks.add(task)
task.runnable = runnable
@rpc_method()
def announce_scheduling_failure(self, task_name, family, params, expl, owners, **kwargs):
if not self._config.batch_emails:
return
worker_id = kwargs['worker']
batched_params, _ = self._state.get_batcher(worker_id, family)
if batched_params:
unbatched_params = {
param: value
for param, value in six.iteritems(params)
if param not in batched_params
}
else:
unbatched_params = params
self._email_batcher.add_scheduling_fail(task_name, family, unbatched_params, expl, owners)
@rpc_method()
def add_worker(self, worker, info, **kwargs):
self._state.get_worker(worker).add_info(info)
@rpc_method()
def disable_worker(self, worker):
self._state.disable_workers({worker})
@rpc_method()
def set_worker_processes(self, worker, n):
self._state.get_worker(worker).add_rpc_message('set_worker_processes', n=n)
@rpc_method()
def send_scheduler_message(self, worker, task, content):
if not self._config.send_messages:
return {"message_id": None}
message_id = str(uuid.uuid4())
self._state.get_worker(worker).add_rpc_message('dispatch_scheduler_message', task_id=task,
message_id=message_id, content=content)
return {"message_id": message_id}
@rpc_method()
def add_scheduler_message_response(self, task_id, message_id, response):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
task.scheduler_message_responses[message_id] = response
@rpc_method()
def get_scheduler_message_response(self, task_id, message_id):
response = None
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
response = task.scheduler_message_responses.pop(message_id, None)
return {"response": response}
@rpc_method()
def is_pause_enabled(self):
return {'enabled': self._config.pause_enabled}
@rpc_method()
def is_paused(self):
return {'paused': self._paused}
@rpc_method()
def pause(self):
if self._config.pause_enabled:
self._paused = True
@rpc_method()
def unpause(self):
if self._config.pause_enabled:
self._paused = False
@rpc_method()
def update_resources(self, **resources):
if self._resources is None:
self._resources = {}
self._resources.update(resources)
@rpc_method()
def update_resource(self, resource, amount):
if not isinstance(amount, int) or amount < 0:
return False
self._resources[resource] = amount
return True
def _generate_retry_policy(self, task_retry_policy_dict):
retry_policy_dict = self._config._get_retry_policy()._asdict()
retry_policy_dict.update({k: v for k, v in six.iteritems(task_retry_policy_dict) if v is not None})
return RetryPolicy(**retry_policy_dict)
def _has_resources(self, needed_resources, used_resources):
if needed_resources is None:
return True
available_resources = self._resources or {}
for resource, amount in six.iteritems(needed_resources):
if amount + used_resources[resource] > available_resources.get(resource, 1):
return False
return True
def _used_resources(self):
used_resources = collections.defaultdict(int)
if self._resources is not None:
for task in self._state.get_active_tasks_by_status(RUNNING):
resources_running = getattr(task, "resources_running", task.resources)
if resources_running:
for resource, amount in six.iteritems(resources_running):
used_resources[resource] += amount
return used_resources
def _rank(self, task):
"""
Return worker's rank function for task scheduling.
:return:
"""
return task.priority, -task.time
def _schedulable(self, task):
if task.status != PENDING:
return False
for dep in task.deps:
dep_task = self._state.get_task(dep, default=None)
if dep_task is None or dep_task.status != DONE:
return False
return True
def _reset_orphaned_batch_running_tasks(self, worker_id):
running_batch_ids = {
task.batch_id
for task in self._state.get_active_tasks_by_status(RUNNING)
if task.worker_running == worker_id
}
orphaned_tasks = [
task for task in self._state.get_active_tasks_by_status(BATCH_RUNNING)
if task.worker_running == worker_id and task.batch_id not in running_batch_ids
]
for task in orphaned_tasks:
self._state.set_status(task, PENDING)
@rpc_method()
def count_pending(self, worker):
worker_id, worker = worker, self._state.get_worker(worker)
num_pending, num_unique_pending, num_pending_last_scheduled = 0, 0, 0
running_tasks = []
upstream_status_table = {}
for task in worker.get_tasks(self._state, RUNNING):
if self._upstream_status(task.id, upstream_status_table) == UPSTREAM_DISABLED:
continue
# Return a list of currently running tasks to the client,
# makes it easier to troubleshoot
other_worker = self._state.get_worker(task.worker_running)
if other_worker is not None:
more_info = {'task_id': task.id, 'worker': str(other_worker)}
more_info.update(other_worker.info)
running_tasks.append(more_info)
for task in worker.get_tasks(self._state, PENDING, FAILED):
if self._upstream_status(task.id, upstream_status_table) == UPSTREAM_DISABLED:
continue
num_pending += 1
num_unique_pending += int(len(task.workers) == 1)
num_pending_last_scheduled += int(task.workers.peek(last=True) == worker_id)
return {
'n_pending_tasks': num_pending,
'n_unique_pending': num_unique_pending,
'n_pending_last_scheduled': num_pending_last_scheduled,
'worker_state': worker.state,
'running_tasks': running_tasks,
}
@rpc_method(allow_null=False)
def get_work(self, host=None, assistant=False, current_tasks=None, worker=None, **kwargs):
# TODO: remove any expired nodes
# Algo: iterate over all nodes, find the highest priority node no dependencies and available
# resources.
# Resource checking looks both at currently available resources and at which resources would
# be available if all running tasks died and we rescheduled all workers greedily. We do both
# checks in order to prevent a worker with many low-priority tasks from starving other
# workers with higher priority tasks that share the same resources.
# TODO: remove tasks that can't be done, figure out if the worker has absolutely
# nothing it can wait for
if self._config.prune_on_get_work:
self.prune()
assert worker is not None
worker_id = worker
worker = self._update_worker(
worker_id,
worker_reference={'host': host},
get_work=True)
if not worker.enabled:
reply = {'n_pending_tasks': 0,
'running_tasks': [],
'task_id': None,
'n_unique_pending': 0,
'worker_state': worker.state,
}
return reply
if assistant:
self.add_worker(worker_id, [('assistant', assistant)])
batched_params, unbatched_params, batched_tasks, max_batch_size = None, None, [], 1
best_task = None
if current_tasks is not None:
ct_set = set(current_tasks)
for task in sorted(self._state.get_active_tasks_by_status(RUNNING), key=self._rank):
if task.worker_running == worker_id and task.id not in ct_set:
best_task = task
if current_tasks is not None:
# batch running tasks that weren't claimed since the last get_work go back in the pool
self._reset_orphaned_batch_running_tasks(worker_id)
greedy_resources = collections.defaultdict(int)
worker = self._state.get_worker(worker_id)
if self._paused:
relevant_tasks = []
elif worker.is_trivial_worker(self._state):
relevant_tasks = worker.get_tasks(self._state, PENDING, RUNNING)
used_resources = collections.defaultdict(int)
greedy_workers = dict() # If there's no resources, then they can grab any task
else:
relevant_tasks = self._state.get_active_tasks_by_status(PENDING, RUNNING)
used_resources = self._used_resources()
activity_limit = time.time() - self._config.worker_disconnect_delay
active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit)
greedy_workers = dict((worker.id, worker.info.get('workers', 1))
for worker in active_workers)
tasks = list(relevant_tasks)
tasks.sort(key=self._rank, reverse=True)
for task in tasks:
if (best_task and batched_params and task.family == best_task.family and
len(batched_tasks) < max_batch_size and task.is_batchable() and all(
task.params.get(name) == value for name, value in unbatched_params.items()) and
task.resources == best_task.resources and self._schedulable(task)):
for name, params in batched_params.items():
params.append(task.params.get(name))
batched_tasks.append(task)
if best_task:
continue
if task.status == RUNNING and (task.worker_running in greedy_workers):
greedy_workers[task.worker_running] -= 1
for resource, amount in six.iteritems((getattr(task, 'resources_running', task.resources) or {})):
greedy_resources[resource] += amount
if self._schedulable(task) and self._has_resources(task.resources, greedy_resources):
in_workers = (assistant and task.runnable) or worker_id in task.workers
if in_workers and self._has_resources(task.resources, used_resources):
best_task = task
batch_param_names, max_batch_size = self._state.get_batcher(
worker_id, task.family)
if batch_param_names and task.is_batchable():
try:
batched_params = {
name: [task.params[name]] for name in batch_param_names
}
unbatched_params = {
name: value for name, value in task.params.items()
if name not in batched_params
}
batched_tasks.append(task)
except KeyError:
batched_params, unbatched_params = None, None
else:
workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers
for task_worker in workers:
if greedy_workers.get(task_worker, 0) > 0:
# use up a worker
greedy_workers[task_worker] -= 1
# keep track of the resources used in greedy scheduling
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
break
reply = self.count_pending(worker_id)
if len(batched_tasks) > 1:
batch_string = '|'.join(task.id for task in batched_tasks)
batch_id = hashlib.md5(batch_string.encode('utf-8')).hexdigest()
for task in batched_tasks:
self._state.set_batch_running(task, batch_id, worker_id)
combined_params = best_task.params.copy()
combined_params.update(batched_params)
reply['task_id'] = None
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = combined_params
reply['batch_id'] = batch_id
reply['batch_task_ids'] = [task.id for task in batched_tasks]
elif best_task:
self._state.set_status(best_task, RUNNING, self._config)
best_task.worker_running = worker_id
best_task.resources_running = best_task.resources.copy()
best_task.time_running = time.time()
self._update_task_history(best_task, RUNNING, host=host)
reply['task_id'] = best_task.id
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = best_task.params
else:
reply['task_id'] = None
return reply
@rpc_method(attempts=1)
def ping(self, **kwargs):
worker_id = kwargs['worker']
worker = self._update_worker(worker_id)
return {"rpc_messages": worker.fetch_rpc_messages()}
def _upstream_status(self, task_id, upstream_status_table):
if task_id in upstream_status_table:
return upstream_status_table[task_id]
elif self._state.has_task(task_id):
task_stack = [task_id]
while task_stack:
dep_id = task_stack.pop()
dep = self._state.get_task(dep_id)
if dep:
if dep.status == DONE:
continue
if dep_id not in upstream_status_table:
if dep.status == PENDING and dep.deps:
task_stack += [dep_id] + list(dep.deps)
upstream_status_table[dep_id] = '' # will be updated postorder
else:
dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '')
upstream_status_table[dep_id] = dep_status
elif upstream_status_table[dep_id] == '' and dep.deps:
# This is the postorder update step when we set the
# status based on the previously calculated child elements
status = max((upstream_status_table.get(a_task_id, '')
for a_task_id in dep.deps),
key=UPSTREAM_SEVERITY_KEY)
upstream_status_table[dep_id] = status
return upstream_status_table[dep_id]
def _serialize_task(self, task_id, include_deps=True, deps=None):
task = self._state.get_task(task_id)
ret = {
'display_name': task.pretty_id,
'status': task.status,
'workers': list(task.workers),
'worker_running': task.worker_running,
'time_running': getattr(task, "time_running", None),
'start_time': task.time,
'last_updated': getattr(task, "updated", task.time),
'params': task.public_params,
'name': task.family,
'priority': task.priority,
'resources': task.resources,
'resources_running': getattr(task, "resources_running", None),
'tracking_url': getattr(task, "tracking_url", None),
'status_message': getattr(task, "status_message", None),
'progress_percentage': getattr(task, "progress_percentage", None),
}
if task.status == DISABLED:
ret['re_enable_able'] = task.scheduler_disable_time is not None
if include_deps:
ret['deps'] = list(task.deps if deps is None else deps)
if self._config.send_messages and task.status == RUNNING:
ret['accepts_messages'] = task.accepts_messages
return ret
@rpc_method()
def graph(self, **kwargs):
self.prune()
serialized = {}
seen = set()
for task in self._state.get_active_tasks():
serialized.update(self._traverse_graph(task.id, seen))
return serialized
def _filter_done(self, task_ids):
for task_id in task_ids:
task = self._state.get_task(task_id)
if task is None or task.status != DONE:
yield task_id
def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True):
""" Returns the dependency graph rooted at task_id
This does a breadth-first traversal to find the nodes closest to the
root before hitting the scheduler.max_graph_nodes limit.
:param root_task_id: the id of the graph's root
:return: A map of task id to serialized node
"""
if seen is None:
seen = set()
elif root_task_id in seen:
return {}
if dep_func is None:
def dep_func(t):
return t.deps
seen.add(root_task_id)
serialized = {}
queue = collections.deque([root_task_id])
while queue:
task_id = queue.popleft()
task = self._state.get_task(task_id)
if task is None or not task.family:
logger.debug('Missing task for id [%s]', task_id)
# NOTE : If a dependency is missing from self._state there is no way to deduce the
# task family and parameters.
family_match = TASK_FAMILY_RE.match(task_id)
family = family_match.group(1) if family_match else UNKNOWN
params = {'task_id': task_id}
serialized[task_id] = {
'deps': [],
'status': UNKNOWN,
'workers': [],
'start_time': UNKNOWN,
'params': params,
'name': family,
'display_name': task_id,
'priority': 0,
}
else:
deps = dep_func(task)
if not include_done:
deps = list(self._filter_done(deps))
serialized[task_id] = self._serialize_task(task_id, deps=deps)
for dep in sorted(deps):
if dep not in seen:
seen.add(dep)
queue.append(dep)
if task_id != root_task_id:
del serialized[task_id]['display_name']
if len(serialized) >= self._config.max_graph_nodes:
break
return serialized
@rpc_method()
def dep_graph(self, task_id, include_done=True, **kwargs):
self.prune()
if not self._state.has_task(task_id):
return {}
return self._traverse_graph(task_id, include_done=include_done)
@rpc_method()
def inverse_dep_graph(self, task_id, include_done=True, **kwargs):
self.prune()
if not self._state.has_task(task_id):
return {}
inverse_graph = collections.defaultdict(set)
for task in self._state.get_active_tasks():
for dep in task.deps:
inverse_graph[dep].add(task.id)
return self._traverse_graph(
task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done)
@rpc_method()
def task_list(self, status='', upstream_status='', limit=True, search=None, max_shown_tasks=None,
**kwargs):
"""
Query for a subset of tasks by status.
"""
if not search:
count_limit = max_shown_tasks or self._config.max_shown_tasks
pre_count = self._state.get_active_task_count_for_status(status)
if limit and pre_count > count_limit:
return {'num_tasks': -1 if upstream_status else pre_count}
self.prune()
result = {}
upstream_status_table = {} # used to memoize upstream status
if search is None:
def filter_func(_):
return True
else:
terms = search.split()
def filter_func(t):
return all(term in t.pretty_id for term in terms)
tasks = self._state.get_active_tasks_by_status(status) if status else self._state.get_active_tasks()
for task in filter(filter_func, tasks):
if task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table):
serialized = self._serialize_task(task.id, include_deps=False)
result[task.id] = serialized
if limit and len(result) > (max_shown_tasks or self._config.max_shown_tasks):
return {'num_tasks': len(result)}
return result
def _first_task_display_name(self, worker):
task_id = worker.info.get('first_task', '')
if self._state.has_task(task_id):
return self._state.get_task(task_id).pretty_id
else:
return task_id
@rpc_method()
def worker_list(self, include_running=True, **kwargs):
self.prune()
workers = [
dict(
name=worker.id,
last_active=worker.last_active,
started=worker.started,
state=worker.state,
first_task_display_name=self._first_task_display_name(worker),
num_unread_rpc_messages=len(worker.rpc_messages),
**worker.info
) for worker in self._state.get_active_workers()]
workers.sort(key=lambda worker: worker['started'], reverse=True)
if include_running:
running = collections.defaultdict(dict)
for task in self._state.get_active_tasks_by_status(RUNNING):
if task.worker_running:
running[task.worker_running][task.id] = self._serialize_task(task.id, include_deps=False)
num_pending = collections.defaultdict(int)
num_uniques = collections.defaultdict(int)
for task in self._state.get_active_tasks_by_status(PENDING):
for worker in task.workers:
num_pending[worker] += 1
if len(task.workers) == 1:
num_uniques[list(task.workers)[0]] += 1
for worker in workers:
tasks = running[worker['name']]
worker['num_running'] = len(tasks)
worker['num_pending'] = num_pending[worker['name']]
worker['num_uniques'] = num_uniques[worker['name']]
worker['running'] = tasks
return workers
@rpc_method()
def resource_list(self):
"""
Resources usage info and their consumers (tasks).
"""
self.prune()
resources = [
dict(
name=resource,
num_total=r_dict['total'],
num_used=r_dict['used']
) for resource, r_dict in six.iteritems(self.resources())]
if self._resources is not None:
consumers = collections.defaultdict(dict)
for task in self._state.get_active_tasks_by_status(RUNNING):
if task.status == RUNNING and task.resources:
for resource, amount in six.iteritems(task.resources):
consumers[resource][task.id] = self._serialize_task(task.id, include_deps=False)
for resource in resources:
tasks = consumers[resource['name']]
resource['num_consumer'] = len(tasks)
resource['running'] = tasks
return resources
def resources(self):
''' get total resources and available ones '''
used_resources = self._used_resources()
ret = collections.defaultdict(dict)
for resource, total in six.iteritems(self._resources):
ret[resource]['total'] = total
if resource in used_resources:
ret[resource]['used'] = used_resources[resource]
else:
ret[resource]['used'] = 0
return ret
@rpc_method()
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, include_deps=False)
result[task.status][task.id] = serialized
return result
@rpc_method()
def re_enable_task(self, task_id):
serialized = {}
task = self._state.get_task(task_id)
if task and task.status == DISABLED and task.scheduler_disable_time:
self._state.re_enable(task, self._config)
serialized = self._serialize_task(task_id)
return serialized
@rpc_method()
def fetch_error(self, task_id, **kwargs):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id}
else:
return {"taskId": task_id, "error": ""}
@rpc_method()
def set_task_status_message(self, task_id, status_message):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
task.status_message = status_message
if task.status == RUNNING and task.batch_id is not None:
for batch_task in self._state.get_batch_running_tasks(task.batch_id):
batch_task.status_message = status_message
@rpc_method()
def get_task_status_message(self, task_id):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
return {"taskId": task_id, "statusMessage": task.status_message}
else:
return {"taskId": task_id, "statusMessage": ""}
@rpc_method()
def set_task_progress_percentage(self, task_id, progress_percentage):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
task.progress_percentage = progress_percentage
if task.status == RUNNING and task.batch_id is not None:
for batch_task in self._state.get_batch_running_tasks(task.batch_id):
batch_task.progress_percentage = progress_percentage
@rpc_method()
def get_task_progress_percentage(self, task_id):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
return {"taskId": task_id, "progressPercentage": task.progress_percentage}
else:
return {"taskId": task_id, "progressPercentage": None}
@rpc_method()
def decrease_running_task_resources(self, task_id, decrease_resources):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
if task.status != RUNNING:
return
def decrease(resources, decrease_resources):
for resource, decrease_amount in six.iteritems(decrease_resources):
if decrease_amount > 0 and resource in resources:
resources[resource] = max(0, resources[resource] - decrease_amount)
decrease(task.resources_running, decrease_resources)
if task.batch_id is not None:
for batch_task in self._state.get_batch_running_tasks(task.batch_id):
decrease(batch_task.resources_running, decrease_resources)
@rpc_method()
def get_running_task_resources(self, task_id):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
return {"taskId": task_id, "resources": getattr(task, "resources_running", None)}
else:
return {"taskId": task_id, "resources": None}
def _update_task_history(self, task, status, host=None):
try:
if status == DONE or status == FAILED:
successful = (status == DONE)
self._task_history.task_finished(task, successful)
elif status == PENDING:
self._task_history.task_scheduled(task)
elif status == RUNNING:
self._task_history.task_started(task, host)
except BaseException:
logger.warning("Error saving Task history", exc_info=True)
@property
def task_history(self):
# Used by server.py to expose the calls
return self._task_history
| 1 | 18,422 | `hasattr(task, 'param_visibilities')` is more concise with the same effect, but either one works. | spotify-luigi | py |
@@ -364,6 +364,12 @@ MSGS = {
"end up in having multiple values passed for the aforementioned parameter in "
"case the method is called with keyword arguments.",
),
+ "W1114": (
+ "Positional arguments are out of order",
+ "arguments-out-of-order",
+ "Attributes given to a function call have are being passed in a different order "
+ "to the function's definition",
+ ),
}
# builtin sequence types in Python 2 and 3. | 1 | # -*- coding: utf-8 -*-
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2009 James Lingard <[email protected]>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2014-2018 Claudiu Popa <[email protected]>
# Copyright (c) 2014 David Shea <[email protected]>
# Copyright (c) 2014 Steven Myint <[email protected]>
# Copyright (c) 2014 Holger Peters <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Anentropic <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2015 Rene Zhang <[email protected]>
# Copyright (c) 2015 Radu Ciorba <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016 Alexander Todorov <[email protected]>
# Copyright (c) 2016 Ashley Whetter <[email protected]>
# Copyright (c) 2016 Jürgen Hermann <[email protected]>
# Copyright (c) 2016 Jakub Wilk <[email protected]>
# Copyright (c) 2016 Filipe Brandenburger <[email protected]>
# Copyright (c) 2017-2018 hippo91 <[email protected]>
# Copyright (c) 2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2017 Derek Gustafson <[email protected]>
# Copyright (c) 2017 Ville Skyttä <[email protected]>
# Copyright (c) 2018 Nick Drozd <[email protected]>
# Copyright (c) 2018 Mike Frysinger <[email protected]>
# Copyright (c) 2018 Ben Green <[email protected]>
# Copyright (c) 2018 Konstantin <[email protected]>
# Copyright (c) 2018 Justin Li <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""try to find more bugs in the code using astroid inference capabilities
"""
import builtins
import fnmatch
import heapq
import itertools
import operator
import re
import shlex
import sys
import types
from collections.abc import Sequence
from functools import singledispatch
import astroid
import astroid.arguments
import astroid.context
import astroid.nodes
from astroid import bases, decorators, exceptions, modutils, objects
from astroid.interpreter import dunder_lookup
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
check_messages,
decorated_with,
decorated_with_property,
has_known_bases,
is_builtin_object,
is_comprehension,
is_inside_abstract_class,
is_iterable,
is_mapping,
is_super,
node_ignores_exception,
safe_infer,
supports_delitem,
supports_getitem,
supports_membership_test,
supports_setitem,
)
from pylint.interfaces import INFERENCE, IAstroidChecker
from pylint.utils import get_global_option
BUILTINS = builtins.__name__
STR_FORMAT = {"%s.str.format" % BUILTINS}
ASYNCIO_COROUTINE = "asyncio.coroutines.coroutine"
def _unflatten(iterable):
for index, elem in enumerate(iterable):
if isinstance(elem, Sequence) and not isinstance(elem, str):
for single_elem in _unflatten(elem):
yield single_elem
elif elem and not index:
# We're interested only in the first element.
yield elem
def _flatten_container(iterable):
# Flatten nested containers into a single iterable
for item in iterable:
if isinstance(item, (list, tuple, types.GeneratorType)):
yield from _flatten_container(item)
else:
yield item
def _is_owner_ignored(owner, name, ignored_classes, ignored_modules):
"""Check if the given owner should be ignored
This will verify if the owner's module is in *ignored_modules*
or the owner's module fully qualified name is in *ignored_modules*
or if the *ignored_modules* contains a pattern which catches
the fully qualified name of the module.
Also, similar checks are done for the owner itself, if its name
matches any name from the *ignored_classes* or if its qualified
name can be found in *ignored_classes*.
"""
ignored_modules = set(ignored_modules)
module_name = owner.root().name
module_qname = owner.root().qname()
if any(
module_name in ignored_modules
or module_qname in ignored_modules
or fnmatch.fnmatch(module_qname, ignore)
for ignore in ignored_modules
):
return True
ignored_classes = set(ignored_classes)
if hasattr(owner, "qname"):
qname = owner.qname()
else:
qname = ""
return any(ignore in (name, qname) for ignore in ignored_classes)
@singledispatch
def _node_names(node):
if not hasattr(node, "locals"):
return []
return node.locals.keys()
@_node_names.register(astroid.ClassDef)
@_node_names.register(astroid.Instance)
def _(node):
values = itertools.chain(node.instance_attrs.keys(), node.locals.keys())
try:
mro = node.mro()[1:]
except (NotImplementedError, TypeError):
mro = node.ancestors()
other_values = [value for cls in mro for value in _node_names(cls)]
return itertools.chain(values, other_values)
def _string_distance(seq1, seq2):
seq2_length = len(seq2)
row = list(range(1, seq2_length + 1)) + [0]
for seq1_index, seq1_char in enumerate(seq1):
last_row = row
row = [0] * seq2_length + [seq1_index + 1]
for seq2_index, seq2_char in enumerate(seq2):
row[seq2_index] = min(
last_row[seq2_index] + 1,
row[seq2_index - 1] + 1,
last_row[seq2_index - 1] + (seq1_char != seq2_char),
)
return row[seq2_length - 1]
def _similar_names(owner, attrname, distance_threshold, max_choices):
"""Given an owner and a name, try to find similar names
The similar names are searched given a distance metric and only
a given number of choices will be returned.
"""
possible_names = []
names = _node_names(owner)
for name in names:
if name == attrname:
continue
distance = _string_distance(attrname, name)
if distance <= distance_threshold:
possible_names.append((name, distance))
# Now get back the values with a minimum, up to the given
# limit or choices.
picked = [
name
for (name, _) in heapq.nsmallest(
max_choices, possible_names, key=operator.itemgetter(1)
)
]
return sorted(picked)
def _missing_member_hint(owner, attrname, distance_threshold, max_choices):
names = _similar_names(owner, attrname, distance_threshold, max_choices)
if not names:
# No similar name.
return ""
names = list(map(repr, names))
if len(names) == 1:
names = ", ".join(names)
else:
names = "one of {} or {}".format(", ".join(names[:-1]), names[-1])
return "; maybe {}?".format(names)
MSGS = {
"E1101": (
"%s %r has no %r member%s",
"no-member",
"Used when a variable is accessed for an unexistent member.",
{"old_names": [("E1103", "maybe-no-member")]},
),
"I1101": (
"%s %r has no %r member%s, but source is unavailable. Consider "
"adding this module to extension-pkg-whitelist if you want "
"to perform analysis based on run-time introspection of living objects.",
"c-extension-no-member",
"Used when a variable is accessed for non-existent member of C "
"extension. Due to unavailability of source static analysis is impossible, "
"but it may be performed by introspecting living objects in run-time.",
),
"E1102": (
"%s is not callable",
"not-callable",
"Used when an object being called has been inferred to a non "
"callable object.",
),
"E1111": (
"Assigning result of a function call, where the function has no return",
"assignment-from-no-return",
"Used when an assignment is done on a function call but the "
"inferred function doesn't return anything.",
),
"E1120": (
"No value for argument %s in %s call",
"no-value-for-parameter",
"Used when a function call passes too few arguments.",
),
"E1121": (
"Too many positional arguments for %s call",
"too-many-function-args",
"Used when a function call passes too many positional arguments.",
),
"E1123": (
"Unexpected keyword argument %r in %s call",
"unexpected-keyword-arg",
"Used when a function call passes a keyword argument that "
"doesn't correspond to one of the function's parameter names.",
),
"E1124": (
"Argument %r passed by position and keyword in %s call",
"redundant-keyword-arg",
"Used when a function call would result in assigning multiple "
"values to a function parameter, one value from a positional "
"argument and one from a keyword argument.",
),
"E1125": (
"Missing mandatory keyword argument %r in %s call",
"missing-kwoa",
(
"Used when a function call does not pass a mandatory"
" keyword-only argument."
),
),
"E1126": (
"Sequence index is not an int, slice, or instance with __index__",
"invalid-sequence-index",
"Used when a sequence type is indexed with an invalid type. "
"Valid types are ints, slices, and objects with an __index__ "
"method.",
),
"E1127": (
"Slice index is not an int, None, or instance with __index__",
"invalid-slice-index",
"Used when a slice index is not an integer, None, or an object "
"with an __index__ method.",
),
"E1128": (
"Assigning result of a function call, where the function returns None",
"assignment-from-none",
"Used when an assignment is done on a function call but the "
"inferred function returns nothing but None.",
{"old_names": [("W1111", "assignment-from-none")]},
),
"E1129": (
"Context manager '%s' doesn't implement __enter__ and __exit__.",
"not-context-manager",
"Used when an instance in a with statement doesn't implement "
"the context manager protocol(__enter__/__exit__).",
),
"E1130": (
"%s",
"invalid-unary-operand-type",
"Emitted when a unary operand is used on an object which does not "
"support this type of operation.",
),
"E1131": (
"%s",
"unsupported-binary-operation",
"Emitted when a binary arithmetic operation between two "
"operands is not supported.",
),
"E1132": (
"Got multiple values for keyword argument %r in function call",
"repeated-keyword",
"Emitted when a function call got multiple values for a keyword.",
),
"E1135": (
"Value '%s' doesn't support membership test",
"unsupported-membership-test",
"Emitted when an instance in membership test expression doesn't "
"implement membership protocol (__contains__/__iter__/__getitem__).",
),
"E1136": (
"Value '%s' is unsubscriptable",
"unsubscriptable-object",
"Emitted when a subscripted value doesn't support subscription "
"(i.e. doesn't define __getitem__ method or __class_getitem__ for a class).",
),
"E1137": (
"%r does not support item assignment",
"unsupported-assignment-operation",
"Emitted when an object does not support item assignment "
"(i.e. doesn't define __setitem__ method).",
),
"E1138": (
"%r does not support item deletion",
"unsupported-delete-operation",
"Emitted when an object does not support item deletion "
"(i.e. doesn't define __delitem__ method).",
),
"E1139": (
"Invalid metaclass %r used",
"invalid-metaclass",
"Emitted whenever we can detect that a class is using, "
"as a metaclass, something which might be invalid for using as "
"a metaclass.",
),
"E1140": (
"Dict key is unhashable",
"unhashable-dict-key",
"Emitted when a dict key is not hashable "
"(i.e. doesn't define __hash__ method).",
),
"E1141": (
"Unpacking a dictionary in iteration without calling .items()",
"dict-iter-missing-items",
"Emitted when trying to iterate through a dict without calling .items()",
),
"W1113": (
"Keyword argument before variable positional arguments list "
"in the definition of %s function",
"keyword-arg-before-vararg",
"When defining a keyword argument before variable positional arguments, one can "
"end up in having multiple values passed for the aforementioned parameter in "
"case the method is called with keyword arguments.",
),
}
# builtin sequence types in Python 2 and 3.
SEQUENCE_TYPES = {
"str",
"unicode",
"list",
"tuple",
"bytearray",
"xrange",
"range",
"bytes",
"memoryview",
}
def _emit_no_member(node, owner, owner_name, ignored_mixins=True, ignored_none=True):
"""Try to see if no-member should be emitted for the given owner.
The following cases are ignored:
* the owner is a function and it has decorators.
* the owner is an instance and it has __getattr__, __getattribute__ implemented
* the module is explicitly ignored from no-member checks
* the owner is a class and the name can be found in its metaclass.
* The access node is protected by an except handler, which handles
AttributeError, Exception or bare except.
"""
# pylint: disable=too-many-return-statements
if node_ignores_exception(node, AttributeError):
return False
if ignored_none and isinstance(owner, astroid.Const) and owner.value is None:
return False
if is_super(owner) or getattr(owner, "type", None) == "metaclass":
return False
if ignored_mixins and owner_name[-5:].lower() == "mixin":
return False
if isinstance(owner, astroid.FunctionDef) and owner.decorators:
return False
if isinstance(owner, (astroid.Instance, astroid.ClassDef)):
if owner.has_dynamic_getattr():
# Issue #2565: Don't ignore enums, as they have a `__getattr__` but it's not
# invoked at this point.
try:
metaclass = owner.metaclass()
except exceptions.MroError:
return False
if metaclass:
return metaclass.qname() == "enum.EnumMeta"
return False
if not has_known_bases(owner):
return False
# Exclude typed annotations, since these might actually exist
# at some point during the runtime of the program.
attribute = owner.locals.get(node.attrname, [None])[0]
if (
attribute
and isinstance(attribute, astroid.AssignName)
and isinstance(attribute.parent, astroid.AnnAssign)
):
return False
if isinstance(owner, objects.Super):
# Verify if we are dealing with an invalid Super object.
# If it is invalid, then there's no point in checking that
# it has the required attribute. Also, don't fail if the
# MRO is invalid.
try:
owner.super_mro()
except (exceptions.MroError, exceptions.SuperError):
return False
if not all(map(has_known_bases, owner.type.mro())):
return False
if isinstance(owner, astroid.Module):
try:
owner.getattr("__getattr__")
return False
except astroid.NotFoundError:
pass
if node.attrname.startswith("_" + owner_name):
# Test if an attribute has been mangled ('private' attribute)
unmangled_name = node.attrname.split("_" + owner_name)[-1]
try:
if owner.getattr(unmangled_name, context=None) is not None:
return False
except astroid.NotFoundError:
return True
return True
def _determine_callable(callable_obj):
# Ordering is important, since BoundMethod is a subclass of UnboundMethod,
# and Function inherits Lambda.
parameters = 0
if hasattr(callable_obj, "implicit_parameters"):
parameters = callable_obj.implicit_parameters()
if isinstance(callable_obj, astroid.BoundMethod):
# Bound methods have an extra implicit 'self' argument.
return callable_obj, parameters, callable_obj.type
if isinstance(callable_obj, astroid.UnboundMethod):
return callable_obj, parameters, "unbound method"
if isinstance(callable_obj, astroid.FunctionDef):
return callable_obj, parameters, callable_obj.type
if isinstance(callable_obj, astroid.Lambda):
return callable_obj, parameters, "lambda"
if isinstance(callable_obj, astroid.ClassDef):
# Class instantiation, lookup __new__ instead.
# If we only find object.__new__, we can safely check __init__
# instead. If __new__ belongs to builtins, then we look
# again for __init__ in the locals, since we won't have
# argument information for the builtin __new__ function.
try:
# Use the last definition of __new__.
new = callable_obj.local_attr("__new__")[-1]
except exceptions.NotFoundError:
new = None
from_object = new and new.parent.scope().name == "object"
from_builtins = new and new.root().name in sys.builtin_module_names
if not new or from_object or from_builtins:
try:
# Use the last definition of __init__.
callable_obj = callable_obj.local_attr("__init__")[-1]
except exceptions.NotFoundError:
# do nothing, covered by no-init.
raise ValueError
else:
callable_obj = new
if not isinstance(callable_obj, astroid.FunctionDef):
raise ValueError
# both have an extra implicit 'cls'/'self' argument.
return callable_obj, parameters, "constructor"
raise ValueError
def _has_parent_of_type(node, node_type, statement):
"""Check if the given node has a parent of the given type."""
parent = node.parent
while not isinstance(parent, node_type) and statement.parent_of(parent):
parent = parent.parent
return isinstance(parent, node_type)
def _is_name_used_as_variadic(name, variadics):
"""Check if the given name is used as a variadic argument."""
return any(
variadic.value == name or variadic.value.parent_of(name)
for variadic in variadics
)
def _no_context_variadic_keywords(node):
statement = node.statement()
scope = node.scope()
variadics = ()
if not isinstance(scope, astroid.FunctionDef):
return False
if isinstance(statement, (astroid.Return, astroid.Expr)) and isinstance(
statement.value, astroid.Call
):
call = statement.value
variadics = list(call.keywords or []) + call.kwargs
return _no_context_variadic(node, scope.args.kwarg, astroid.Keyword, variadics)
def _no_context_variadic_positional(node):
statement = node.statement()
scope = node.scope()
variadics = ()
if not isinstance(scope, astroid.FunctionDef):
return False
if isinstance(statement, (astroid.Expr, astroid.Return)) and isinstance(
statement.value, astroid.Call
):
call = statement.value
variadics = call.starargs + call.kwargs
return _no_context_variadic(node, scope.args.vararg, astroid.Starred, variadics)
def _no_context_variadic(node, variadic_name, variadic_type, variadics):
"""Verify if the given call node has variadic nodes without context
This is a workaround for handling cases of nested call functions
which don't have the specific call context at hand.
Variadic arguments (variable positional arguments and variable
keyword arguments) are inferred, inherently wrong, by astroid
as a Tuple, respectively a Dict with empty elements.
This can lead pylint to believe that a function call receives
too few arguments.
"""
statement = node.statement()
for name in statement.nodes_of_class(astroid.Name):
if name.name != variadic_name:
continue
inferred = safe_infer(name)
if isinstance(inferred, (astroid.List, astroid.Tuple)):
length = len(inferred.elts)
elif isinstance(inferred, astroid.Dict):
length = len(inferred.items)
else:
continue
inferred_statement = inferred.statement()
if not length and isinstance(inferred_statement, astroid.FunctionDef):
is_in_starred_context = _has_parent_of_type(node, variadic_type, statement)
used_as_starred_argument = _is_name_used_as_variadic(name, variadics)
if is_in_starred_context or used_as_starred_argument:
return True
return False
def _is_invalid_metaclass(metaclass):
try:
mro = metaclass.mro()
except NotImplementedError:
# Cannot have a metaclass which is not a newstyle class.
return True
else:
if not any(is_builtin_object(cls) and cls.name == "type" for cls in mro):
return True
return False
def _infer_from_metaclass_constructor(cls, func):
"""Try to infer what the given *func* constructor is building
:param astroid.FunctionDef func:
A metaclass constructor. Metaclass definitions can be
functions, which should accept three arguments, the name of
the class, the bases of the class and the attributes.
The function could return anything, but usually it should
be a proper metaclass.
:param astroid.ClassDef cls:
The class for which the *func* parameter should generate
a metaclass.
:returns:
The class generated by the function or None,
if we couldn't infer it.
:rtype: astroid.ClassDef
"""
context = astroid.context.InferenceContext()
class_bases = astroid.List()
class_bases.postinit(elts=cls.bases)
attrs = astroid.Dict()
local_names = [(name, values[-1]) for name, values in cls.locals.items()]
attrs.postinit(local_names)
builder_args = astroid.Tuple()
builder_args.postinit([cls.name, class_bases, attrs])
context.callcontext = astroid.context.CallContext(builder_args)
try:
inferred = next(func.infer_call_result(func, context), None)
except astroid.InferenceError:
return None
return inferred or None
def _is_c_extension(module_node):
return (
not modutils.is_standard_module(module_node.name)
and not module_node.fully_defined()
)
class TypeChecker(BaseChecker):
"""try to find bugs in the code using type inference
"""
__implements__ = (IAstroidChecker,)
# configuration section name
name = "typecheck"
# messages
msgs = MSGS
priority = -1
# configuration options
options = (
(
"ignore-on-opaque-inference",
{
"default": True,
"type": "yn",
"metavar": "<y_or_n>",
"help": "This flag controls whether pylint should warn about "
"no-member and similar checks whenever an opaque object "
"is returned when inferring. The inference can return "
"multiple potential results while evaluating a Python object, "
"but some branches might not be evaluated, which results in "
"partial inference. In that case, it might be useful to still emit "
"no-member and other checks for the rest of the inferred objects.",
},
),
(
"ignore-mixin-members",
{
"default": True,
"type": "yn",
"metavar": "<y_or_n>",
"help": 'Tells whether missing members accessed in mixin \
class should be ignored. A mixin class is detected if its name ends with \
"mixin" (case insensitive).',
},
),
(
"ignore-none",
{
"default": True,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Tells whether to warn about missing members when the owner "
"of the attribute is inferred to be None.",
},
),
(
"ignored-modules",
{
"default": (),
"type": "csv",
"metavar": "<module names>",
"help": "List of module names for which member attributes "
"should not be checked (useful for modules/projects "
"where namespaces are manipulated during runtime and "
"thus existing member attributes cannot be "
"deduced by static analysis). It supports qualified "
"module names, as well as Unix pattern matching.",
},
),
# the defaults here are *stdlib* names that (almost) always
# lead to false positives, since their idiomatic use is
# 'too dynamic' for pylint to grok.
(
"ignored-classes",
{
"default": ("optparse.Values", "thread._local", "_thread._local"),
"type": "csv",
"metavar": "<members names>",
"help": "List of class names for which member attributes "
"should not be checked (useful for classes with "
"dynamically set attributes). This supports "
"the use of qualified names.",
},
),
(
"generated-members",
{
"default": (),
"type": "string",
"metavar": "<members names>",
"help": "List of members which are set dynamically and \
missed by pylint inference system, and so shouldn't trigger E1101 when \
accessed. Python regular expressions are accepted.",
},
),
(
"contextmanager-decorators",
{
"default": ["contextlib.contextmanager"],
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce context managers, "
"such as contextlib.contextmanager. Add to this list "
"to register other decorators that produce valid "
"context managers.",
},
),
(
"missing-member-hint-distance",
{
"default": 1,
"type": "int",
"metavar": "<member hint edit distance>",
"help": "The minimum edit distance a name should have in order "
"to be considered a similar match for a missing member name.",
},
),
(
"missing-member-max-choices",
{
"default": 1,
"type": "int",
"metavar": "<member hint max choices>",
"help": "The total number of similar names that should be taken in "
"consideration when showing a hint for a missing member.",
},
),
(
"missing-member-hint",
{
"default": True,
"type": "yn",
"metavar": "<missing member hint>",
"help": "Show a hint with possible names when a member name was not "
"found. The aspect of finding the hint is based on edit distance.",
},
),
(
"signature-mutators",
{
"default": [],
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that change the signature of "
"a decorated function.",
},
),
)
@decorators.cachedproperty
def _suggestion_mode(self):
return get_global_option(self, "suggestion-mode", default=True)
def open(self):
# do this in open since config not fully initialized in __init__
# generated_members may contain regular expressions
# (surrounded by quote `"` and followed by a comma `,`)
# REQUEST,aq_parent,"[a-zA-Z]+_set{1,2}"' =>
# ('REQUEST', 'aq_parent', '[a-zA-Z]+_set{1,2}')
if isinstance(self.config.generated_members, str):
gen = shlex.shlex(self.config.generated_members)
gen.whitespace += ","
gen.wordchars += r"[]-+\.*?()|"
self.config.generated_members = tuple(tok.strip('"') for tok in gen)
@check_messages("keyword-arg-before-vararg")
def visit_functiondef(self, node):
# check for keyword arg before varargs
if node.args.vararg and node.args.defaults:
self.add_message("keyword-arg-before-vararg", node=node, args=(node.name))
visit_asyncfunctiondef = visit_functiondef
@check_messages("invalid-metaclass")
def visit_classdef(self, node):
def _metaclass_name(metaclass):
if isinstance(metaclass, (astroid.ClassDef, astroid.FunctionDef)):
return metaclass.name
return metaclass.as_string()
metaclass = node.declared_metaclass()
if not metaclass:
return
if isinstance(metaclass, astroid.FunctionDef):
# Try to infer the result.
metaclass = _infer_from_metaclass_constructor(node, metaclass)
if not metaclass:
# Don't do anything if we cannot infer the result.
return
if isinstance(metaclass, astroid.ClassDef):
if _is_invalid_metaclass(metaclass):
self.add_message(
"invalid-metaclass", node=node, args=(_metaclass_name(metaclass),)
)
else:
self.add_message(
"invalid-metaclass", node=node, args=(_metaclass_name(metaclass),)
)
def visit_assignattr(self, node):
if isinstance(node.assign_type(), astroid.AugAssign):
self.visit_attribute(node)
def visit_delattr(self, node):
self.visit_attribute(node)
@check_messages("no-member", "c-extension-no-member")
def visit_attribute(self, node):
"""check that the accessed attribute exists
to avoid too much false positives for now, we'll consider the code as
correct if a single of the inferred nodes has the accessed attribute.
function/method, super call and metaclasses are ignored
"""
for pattern in self.config.generated_members:
# attribute is marked as generated, stop here
if re.match(pattern, node.attrname):
return
if re.match(pattern, node.as_string()):
return
try:
inferred = list(node.expr.infer())
except exceptions.InferenceError:
return
# list of (node, nodename) which are missing the attribute
missingattr = set()
non_opaque_inference_results = [
owner
for owner in inferred
if owner is not astroid.Uninferable
and not isinstance(owner, astroid.nodes.Unknown)
]
if (
len(non_opaque_inference_results) != len(inferred)
and self.config.ignore_on_opaque_inference
):
# There is an ambiguity in the inference. Since we can't
# make sure that we won't emit a false positive, we just stop
# whenever the inference returns an opaque inference object.
return
for owner in non_opaque_inference_results:
name = getattr(owner, "name", None)
if _is_owner_ignored(
owner, name, self.config.ignored_classes, self.config.ignored_modules
):
continue
try:
if not [
n
for n in owner.getattr(node.attrname)
if not isinstance(n.statement(), astroid.AugAssign)
]:
missingattr.add((owner, name))
continue
except AttributeError:
continue
except exceptions.NotFoundError:
# This can't be moved before the actual .getattr call,
# because there can be more values inferred and we are
# stopping after the first one which has the attribute in question.
# The problem is that if the first one has the attribute,
# but we continue to the next values which doesn't have the
# attribute, then we'll have a false positive.
# So call this only after the call has been made.
if not _emit_no_member(
node,
owner,
name,
ignored_mixins=self.config.ignore_mixin_members,
ignored_none=self.config.ignore_none,
):
continue
missingattr.add((owner, name))
continue
# stop on the first found
break
else:
# we have not found any node with the attributes, display the
# message for inferred nodes
done = set()
for owner, name in missingattr:
if isinstance(owner, astroid.Instance):
actual = owner._proxied
else:
actual = owner
if actual in done:
continue
done.add(actual)
msg, hint = self._get_nomember_msgid_hint(node, owner)
self.add_message(
msg,
node=node,
args=(owner.display_type(), name, node.attrname, hint),
confidence=INFERENCE,
)
def _get_nomember_msgid_hint(self, node, owner):
suggestions_are_possible = self._suggestion_mode and isinstance(
owner, astroid.Module
)
if suggestions_are_possible and _is_c_extension(owner):
msg = "c-extension-no-member"
hint = ""
else:
msg = "no-member"
if self.config.missing_member_hint:
hint = _missing_member_hint(
owner,
node.attrname,
self.config.missing_member_hint_distance,
self.config.missing_member_max_choices,
)
else:
hint = ""
return msg, hint
@check_messages("assignment-from-no-return", "assignment-from-none")
def visit_assign(self, node):
"""check that if assigning to a function call, the function is
possibly returning something valuable
"""
if not isinstance(node.value, astroid.Call):
return
function_node = safe_infer(node.value.func)
# skip class, generator and incomplete function definition
funcs = (astroid.FunctionDef, astroid.UnboundMethod, astroid.BoundMethod)
if not (
isinstance(function_node, funcs)
and function_node.root().fully_defined()
and not function_node.decorators
):
return
if isinstance(function_node, astroid.BoundMethod) and isinstance(
function_node._proxied, astroid.UnboundMethod
):
# Unwrap to get the actual function object
function_node = function_node._proxied._proxied
if (
function_node.is_generator()
or function_node.is_abstract(pass_is_abstract=False)
or isinstance(function_node, astroid.AsyncFunctionDef)
):
return
returns = list(
function_node.nodes_of_class(astroid.Return, skip_klass=astroid.FunctionDef)
)
if not returns:
self.add_message("assignment-from-no-return", node=node)
else:
for rnode in returns:
if not (
isinstance(rnode.value, astroid.Const)
and rnode.value.value is None
or rnode.value is None
):
break
else:
self.add_message("assignment-from-none", node=node)
def _check_uninferable_call(self, node):
"""
Check that the given uninferable Call node does not
call an actual function.
"""
if not isinstance(node.func, astroid.Attribute):
return
# Look for properties. First, obtain
# the lhs of the Attribute node and search the attribute
# there. If that attribute is a property or a subclass of properties,
# then most likely it's not callable.
expr = node.func.expr
klass = safe_infer(expr)
if (
klass is None
or klass is astroid.Uninferable
or not isinstance(klass, astroid.Instance)
):
return
try:
attrs = klass._proxied.getattr(node.func.attrname)
except exceptions.NotFoundError:
return
for attr in attrs:
if attr is astroid.Uninferable:
continue
if not isinstance(attr, astroid.FunctionDef):
continue
# Decorated, see if it is decorated with a property.
# Also, check the returns and see if they are callable.
if decorated_with_property(attr):
try:
all_returns_are_callable = all(
return_node.callable() or return_node is astroid.Uninferable
for return_node in attr.infer_call_result(node)
)
except astroid.InferenceError:
continue
if not all_returns_are_callable:
self.add_message(
"not-callable", node=node, args=node.func.as_string()
)
break
# pylint: disable=too-many-branches
@check_messages(*(list(MSGS.keys())))
def visit_call(self, node):
"""check that called functions/methods are inferred to callable objects,
and that the arguments passed to the function match the parameters in
the inferred function's definition
"""
called = safe_infer(node.func)
# only function, generator and object defining __call__ are allowed
# Ignore instances of descriptors since astroid cannot properly handle them
# yet
if called and not called.callable():
if isinstance(called, astroid.Instance) and (
not has_known_bases(called)
or (
called.parent is not None
and isinstance(called.scope(), astroid.ClassDef)
and "__get__" in called.locals
)
):
# Don't emit if we can't make sure this object is callable.
pass
else:
self.add_message("not-callable", node=node, args=node.func.as_string())
self._check_uninferable_call(node)
try:
called, implicit_args, callable_name = _determine_callable(called)
except ValueError:
# Any error occurred during determining the function type, most of
# those errors are handled by different warnings.
return
if called.args.args is None:
# Built-in functions have no argument information.
return
if len(called.argnames()) != len(set(called.argnames())):
# Duplicate parameter name (see duplicate-argument). We can't really
# make sense of the function call in this case, so just return.
return
# Build the set of keyword arguments, checking for duplicate keywords,
# and count the positional arguments.
call_site = astroid.arguments.CallSite.from_call(node)
# Warn about duplicated keyword arguments, such as `f=24, **{'f': 24}`
for keyword in call_site.duplicated_keywords:
self.add_message("repeated-keyword", node=node, args=(keyword,))
if call_site.has_invalid_arguments() or call_site.has_invalid_keywords():
# Can't make sense of this.
return
# Has the function signature changed in ways we cannot reliably detect?
if hasattr(called, "decorators") and decorated_with(
called, self.config.signature_mutators
):
return
num_positional_args = len(call_site.positional_arguments)
keyword_args = list(call_site.keyword_arguments.keys())
# Determine if we don't have a context for our call and we use variadics.
if isinstance(node.scope(), astroid.FunctionDef):
has_no_context_positional_variadic = _no_context_variadic_positional(node)
has_no_context_keywords_variadic = _no_context_variadic_keywords(node)
else:
has_no_context_positional_variadic = (
has_no_context_keywords_variadic
) = False
# These are coming from the functools.partial implementation in astroid
already_filled_positionals = getattr(called, "filled_positionals", 0)
already_filled_keywords = getattr(called, "filled_keywords", {})
keyword_args += list(already_filled_keywords)
num_positional_args += implicit_args + already_filled_positionals
# Analyze the list of formal parameters.
args = list(itertools.chain(called.args.posonlyargs or (), called.args.args))
num_mandatory_parameters = len(args) - len(called.args.defaults)
parameters = []
parameter_name_to_index = {}
for i, arg in enumerate(args):
if isinstance(arg, astroid.Tuple):
name = None
# Don't store any parameter names within the tuple, since those
# are not assignable from keyword arguments.
else:
assert isinstance(arg, astroid.AssignName)
# This occurs with:
# def f( (a), (b) ): pass
name = arg.name
parameter_name_to_index[name] = i
if i >= num_mandatory_parameters:
defval = called.args.defaults[i - num_mandatory_parameters]
else:
defval = None
parameters.append([(name, defval), False])
kwparams = {}
for i, arg in enumerate(called.args.kwonlyargs):
if isinstance(arg, astroid.Keyword):
name = arg.arg
else:
assert isinstance(arg, astroid.AssignName)
name = arg.name
kwparams[name] = [called.args.kw_defaults[i], False]
# Match the supplied arguments against the function parameters.
# 1. Match the positional arguments.
for i in range(num_positional_args):
if i < len(parameters):
parameters[i][1] = True
elif called.args.vararg is not None:
# The remaining positional arguments get assigned to the *args
# parameter.
break
else:
# Too many positional arguments.
self.add_message(
"too-many-function-args", node=node, args=(callable_name,)
)
break
# 2. Match the keyword arguments.
for keyword in keyword_args:
if keyword in parameter_name_to_index:
i = parameter_name_to_index[keyword]
if parameters[i][1]:
# Duplicate definition of function parameter.
# Might be too hardcoded, but this can actually
# happen when using str.format and `self` is passed
# by keyword argument, as in `.format(self=self)`.
# It's perfectly valid to so, so we're just skipping
# it if that's the case.
if not (keyword == "self" and called.qname() in STR_FORMAT):
self.add_message(
"redundant-keyword-arg",
node=node,
args=(keyword, callable_name),
)
else:
parameters[i][1] = True
elif keyword in kwparams:
if kwparams[keyword][1]:
# Duplicate definition of function parameter.
self.add_message(
"redundant-keyword-arg",
node=node,
args=(keyword, callable_name),
)
else:
kwparams[keyword][1] = True
elif called.args.kwarg is not None:
# The keyword argument gets assigned to the **kwargs parameter.
pass
else:
# Unexpected keyword argument.
self.add_message(
"unexpected-keyword-arg", node=node, args=(keyword, callable_name)
)
# 3. Match the **kwargs, if any.
if node.kwargs:
for i, [(name, defval), assigned] in enumerate(parameters):
# Assume that *kwargs provides values for all remaining
# unassigned named parameters.
if name is not None:
parameters[i][1] = True
else:
# **kwargs can't assign to tuples.
pass
# Check that any parameters without a default have been assigned
# values.
for [(name, defval), assigned] in parameters:
if (defval is None) and not assigned:
if name is None:
display_name = "<tuple>"
else:
display_name = repr(name)
if not has_no_context_positional_variadic:
self.add_message(
"no-value-for-parameter",
node=node,
args=(display_name, callable_name),
)
for name in kwparams:
defval, assigned = kwparams[name]
if defval is None and not assigned and not has_no_context_keywords_variadic:
self.add_message("missing-kwoa", node=node, args=(name, callable_name))
@check_messages("invalid-sequence-index")
def visit_extslice(self, node):
# Check extended slice objects as if they were used as a sequence
# index to check if the object being sliced can support them
return self.visit_index(node)
@check_messages("invalid-sequence-index")
def visit_index(self, node):
if not node.parent or not hasattr(node.parent, "value"):
return None
# Look for index operations where the parent is a sequence type.
# If the types can be determined, only allow indices to be int,
# slice or instances with __index__.
parent_type = safe_infer(node.parent.value)
if not isinstance(
parent_type, (astroid.ClassDef, astroid.Instance)
) or not has_known_bases(parent_type):
return None
# Determine what method on the parent this index will use
# The parent of this node will be a Subscript, and the parent of that
# node determines if the Subscript is a get, set, or delete operation.
if node.parent.ctx is astroid.Store:
methodname = "__setitem__"
elif node.parent.ctx is astroid.Del:
methodname = "__delitem__"
else:
methodname = "__getitem__"
# Check if this instance's __getitem__, __setitem__, or __delitem__, as
# appropriate to the statement, is implemented in a builtin sequence
# type. This way we catch subclasses of sequence types but skip classes
# that override __getitem__ and which may allow non-integer indices.
try:
methods = dunder_lookup.lookup(parent_type, methodname)
if methods is astroid.Uninferable:
return None
itemmethod = methods[0]
except (
exceptions.NotFoundError,
exceptions.AttributeInferenceError,
IndexError,
):
return None
if (
not isinstance(itemmethod, astroid.FunctionDef)
or itemmethod.root().name != BUILTINS
or not itemmethod.parent
or itemmethod.parent.name not in SEQUENCE_TYPES
):
return None
# For ExtSlice objects coming from visit_extslice, no further
# inference is necessary, since if we got this far the ExtSlice
# is an error.
if isinstance(node, astroid.ExtSlice):
index_type = node
else:
index_type = safe_infer(node)
if index_type is None or index_type is astroid.Uninferable:
return None
# Constants must be of type int
if isinstance(index_type, astroid.Const):
if isinstance(index_type.value, int):
return None
# Instance values must be int, slice, or have an __index__ method
elif isinstance(index_type, astroid.Instance):
if index_type.pytype() in (BUILTINS + ".int", BUILTINS + ".slice"):
return None
try:
index_type.getattr("__index__")
return None
except exceptions.NotFoundError:
pass
elif isinstance(index_type, astroid.Slice):
# Delegate to visit_slice. A slice can be present
# here after inferring the index node, which could
# be a `slice(...)` call for instance.
return self.visit_slice(index_type)
# Anything else is an error
self.add_message("invalid-sequence-index", node=node)
return None
@check_messages("invalid-slice-index")
def visit_slice(self, node):
# Check the type of each part of the slice
invalid_slices = 0
for index in (node.lower, node.upper, node.step):
if index is None:
continue
index_type = safe_infer(index)
if index_type is None or index_type is astroid.Uninferable:
continue
# Constants must of type int or None
if isinstance(index_type, astroid.Const):
if isinstance(index_type.value, (int, type(None))):
continue
# Instance values must be of type int, None or an object
# with __index__
elif isinstance(index_type, astroid.Instance):
if index_type.pytype() in (BUILTINS + ".int", BUILTINS + ".NoneType"):
continue
try:
index_type.getattr("__index__")
return
except exceptions.NotFoundError:
pass
invalid_slices += 1
if not invalid_slices:
return
# Anything else is an error, unless the object that is indexed
# is a custom object, which knows how to handle this kind of slices
parent = node.parent
if isinstance(parent, astroid.ExtSlice):
parent = parent.parent
if isinstance(parent, astroid.Subscript):
inferred = safe_infer(parent.value)
if inferred is None or inferred is astroid.Uninferable:
# Don't know what this is
return
known_objects = (
astroid.List,
astroid.Dict,
astroid.Tuple,
astroid.objects.FrozenSet,
astroid.Set,
)
if not isinstance(inferred, known_objects):
# Might be an instance that knows how to handle this slice object
return
for _ in range(invalid_slices):
self.add_message("invalid-slice-index", node=node)
@check_messages("not-context-manager")
def visit_with(self, node):
for ctx_mgr, _ in node.items:
context = astroid.context.InferenceContext()
inferred = safe_infer(ctx_mgr, context=context)
if inferred is None or inferred is astroid.Uninferable:
continue
if isinstance(inferred, bases.Generator):
# Check if we are dealing with a function decorated
# with contextlib.contextmanager.
if decorated_with(
inferred.parent, self.config.contextmanager_decorators
):
continue
# If the parent of the generator is not the context manager itself,
# that means that it could have been returned from another
# function which was the real context manager.
# The following approach is more of a hack rather than a real
# solution: walk all the inferred statements for the
# given *ctx_mgr* and if you find one function scope
# which is decorated, consider it to be the real
# manager and give up, otherwise emit not-context-manager.
# See the test file for not_context_manager for a couple
# of self explaining tests.
# Retrieve node from all previusly visited nodes in the the inference history
context_path_names = filter(None, _unflatten(context.path))
inferred_paths = _flatten_container(
safe_infer(path) for path in context_path_names
)
for inferred_path in inferred_paths:
if not inferred_path:
continue
scope = inferred_path.scope()
if not isinstance(scope, astroid.FunctionDef):
continue
if decorated_with(scope, self.config.contextmanager_decorators):
break
else:
self.add_message(
"not-context-manager", node=node, args=(inferred.name,)
)
else:
try:
inferred.getattr("__enter__")
inferred.getattr("__exit__")
except exceptions.NotFoundError:
if isinstance(inferred, astroid.Instance):
# If we do not know the bases of this class,
# just skip it.
if not has_known_bases(inferred):
continue
# Just ignore mixin classes.
if self.config.ignore_mixin_members:
if inferred.name[-5:].lower() == "mixin":
continue
self.add_message(
"not-context-manager", node=node, args=(inferred.name,)
)
@check_messages("invalid-unary-operand-type")
def visit_unaryop(self, node):
"""Detect TypeErrors for unary operands."""
for error in node.type_errors():
# Let the error customize its output.
self.add_message("invalid-unary-operand-type", args=str(error), node=node)
@check_messages("unsupported-binary-operation")
def _visit_binop(self, node):
"""Detect TypeErrors for binary arithmetic operands."""
self._check_binop_errors(node)
@check_messages("unsupported-binary-operation")
def _visit_augassign(self, node):
"""Detect TypeErrors for augmented binary arithmetic operands."""
self._check_binop_errors(node)
def _check_binop_errors(self, node):
for error in node.type_errors():
# Let the error customize its output.
if any(
isinstance(obj, astroid.ClassDef) and not has_known_bases(obj)
for obj in (error.left_type, error.right_type)
):
continue
self.add_message("unsupported-binary-operation", args=str(error), node=node)
def _check_membership_test(self, node):
if is_inside_abstract_class(node):
return
if is_comprehension(node):
return
inferred = safe_infer(node)
if inferred is None or inferred is astroid.Uninferable:
return
if not supports_membership_test(inferred):
self.add_message(
"unsupported-membership-test", args=node.as_string(), node=node
)
@check_messages("unsupported-membership-test")
def visit_compare(self, node):
if len(node.ops) != 1:
return
op, right = node.ops[0]
if op in ["in", "not in"]:
self._check_membership_test(right)
@check_messages(
"unsubscriptable-object",
"unsupported-assignment-operation",
"unsupported-delete-operation",
"unhashable-dict-key",
)
def visit_subscript(self, node):
supported_protocol = None
if isinstance(node.value, (astroid.ListComp, astroid.DictComp)):
return
if isinstance(node.value, astroid.Dict):
# Assert dict key is hashable
inferred = safe_infer(node.slice.value)
if inferred not in (None, astroid.Uninferable):
try:
hash_fn = next(inferred.igetattr("__hash__"))
except astroid.InferenceError:
pass
else:
if getattr(hash_fn, "value", True) is None:
self.add_message("unhashable-dict-key", node=node.value)
if node.ctx == astroid.Load:
supported_protocol = supports_getitem
msg = "unsubscriptable-object"
elif node.ctx == astroid.Store:
supported_protocol = supports_setitem
msg = "unsupported-assignment-operation"
elif node.ctx == astroid.Del:
supported_protocol = supports_delitem
msg = "unsupported-delete-operation"
if isinstance(node.value, astroid.SetComp):
self.add_message(msg, args=node.value.as_string(), node=node.value)
return
if is_inside_abstract_class(node):
return
inferred = safe_infer(node.value)
if inferred is None or inferred is astroid.Uninferable:
return
if not supported_protocol(inferred):
self.add_message(msg, args=node.value.as_string(), node=node.value)
@check_messages("dict-items-missing-iter")
def visit_for(self, node):
if not isinstance(node.target, astroid.node_classes.Tuple):
# target is not a tuple
return
if not len(node.target.elts) == 2:
# target is not a tuple of two elements
return
iterable = node.iter
if not isinstance(iterable, astroid.node_classes.Name):
# it's not a bare variable
return
inferred = safe_infer(iterable)
if not inferred:
return
if not isinstance(inferred, astroid.node_classes.Dict):
# the iterable is not a dict
return
self.add_message("dict-iter-missing-items", node=node)
class IterableChecker(BaseChecker):
"""
Checks for non-iterables used in an iterable context.
Contexts include:
- for-statement
- starargs in function call
- `yield from`-statement
- list, dict and set comprehensions
- generator expressions
Also checks for non-mappings in function call kwargs.
"""
__implements__ = (IAstroidChecker,)
name = "typecheck"
msgs = {
"E1133": (
"Non-iterable value %s is used in an iterating context",
"not-an-iterable",
"Used when a non-iterable value is used in place where "
"iterable is expected",
),
"E1134": (
"Non-mapping value %s is used in a mapping context",
"not-a-mapping",
"Used when a non-mapping value is used in place where "
"mapping is expected",
),
}
@staticmethod
def _is_asyncio_coroutine(node):
if not isinstance(node, astroid.Call):
return False
inferred_func = safe_infer(node.func)
if not isinstance(inferred_func, astroid.FunctionDef):
return False
if not inferred_func.decorators:
return False
for decorator in inferred_func.decorators.nodes:
inferred_decorator = safe_infer(decorator)
if not isinstance(inferred_decorator, astroid.FunctionDef):
continue
if inferred_decorator.qname() != ASYNCIO_COROUTINE:
continue
return True
return False
def _check_iterable(self, node, check_async=False):
if is_inside_abstract_class(node) or is_comprehension(node):
return
inferred = safe_infer(node)
if not inferred:
return
if not is_iterable(inferred, check_async=check_async):
self.add_message("not-an-iterable", args=node.as_string(), node=node)
def _check_mapping(self, node):
if is_inside_abstract_class(node):
return
if isinstance(node, astroid.DictComp):
return
inferred = safe_infer(node)
if inferred is None or inferred is astroid.Uninferable:
return
if not is_mapping(inferred):
self.add_message("not-a-mapping", args=node.as_string(), node=node)
@check_messages("not-an-iterable")
def visit_for(self, node):
self._check_iterable(node.iter)
@check_messages("not-an-iterable")
def visit_asyncfor(self, node):
self._check_iterable(node.iter, check_async=True)
@check_messages("not-an-iterable")
def visit_yieldfrom(self, node):
if self._is_asyncio_coroutine(node.value):
return
self._check_iterable(node.value)
@check_messages("not-an-iterable", "not-a-mapping")
def visit_call(self, node):
for stararg in node.starargs:
self._check_iterable(stararg.value)
for kwarg in node.kwargs:
self._check_mapping(kwarg.value)
@check_messages("not-an-iterable")
def visit_listcomp(self, node):
for gen in node.generators:
self._check_iterable(gen.iter, check_async=gen.is_async)
@check_messages("not-an-iterable")
def visit_dictcomp(self, node):
for gen in node.generators:
self._check_iterable(gen.iter, check_async=gen.is_async)
@check_messages("not-an-iterable")
def visit_setcomp(self, node):
for gen in node.generators:
self._check_iterable(gen.iter, check_async=gen.is_async)
@check_messages("not-an-iterable")
def visit_generatorexp(self, node):
for gen in node.generators:
self._check_iterable(gen.iter, check_async=gen.is_async)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(TypeChecker(linter))
linter.register_checker(IterableChecker(linter))
| 1 | 11,229 | I would hedge this a little, maybe `Positional arguments appear to be out of order`. After all, it's just a guess. | PyCQA-pylint | py |
@@ -192,7 +192,8 @@ public class AccountActivity extends ThemedActivity implements AccountContract.V
@Override
public void showError() {
- SnackBarHandler.show(coordinatorLayout, getString(no_account_signed_in));
+ Snackbar snackbar = SnackBarHandler.show(coordinatorLayout, getString(no_account_signed_in));
+ snackbar.show();
}
@Override | 1 | package org.fossasia.phimpme.accounts;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.os.Bundle;
import android.support.design.widget.BottomNavigationView;
import android.support.design.widget.CoordinatorLayout;
import android.support.design.widget.Snackbar;
import android.support.v7.app.AlertDialog;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.support.v7.widget.SwitchCompat;
import android.support.v7.widget.Toolbar;
import android.util.Log;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.widget.RelativeLayout;
import com.box.androidsdk.content.BoxConfig;
import com.box.androidsdk.content.auth.BoxAuthentication;
import com.box.androidsdk.content.models.BoxSession;
import com.cloudrail.si.CloudRail;
import com.dropbox.client2.DropboxAPI;
import com.dropbox.client2.android.AndroidAuthSession;
import com.pinterest.android.pdk.PDKCallback;
import com.pinterest.android.pdk.PDKClient;
import com.pinterest.android.pdk.PDKException;
import com.pinterest.android.pdk.PDKResponse;
import com.twitter.sdk.android.core.identity.TwitterAuthClient;
import org.fossasia.phimpme.R;
import org.fossasia.phimpme.base.PhimpmeProgressBarHandler;
import org.fossasia.phimpme.base.RecyclerItemClickListner;
import org.fossasia.phimpme.base.ThemedActivity;
import org.fossasia.phimpme.data.local.AccountDatabase;
import org.fossasia.phimpme.data.local.DatabaseHelper;
import org.fossasia.phimpme.gallery.activities.LFMainActivity;
import org.fossasia.phimpme.gallery.activities.SettingsActivity;
import org.fossasia.phimpme.gallery.util.AlertDialogsHelper;
import org.fossasia.phimpme.gallery.util.ThemeHelper;
import org.fossasia.phimpme.share.flickr.FlickrActivity;
import org.fossasia.phimpme.share.imgur.ImgurAuthActivity;
import org.fossasia.phimpme.share.nextcloud.NextCloudAuth;
import org.fossasia.phimpme.share.owncloud.OwnCloudActivity;
import org.fossasia.phimpme.share.twitter.LoginActivity;
import org.fossasia.phimpme.utilities.ActivitySwitchHelper;
import org.fossasia.phimpme.utilities.BasicCallBack;
import org.fossasia.phimpme.utilities.Constants;
import org.fossasia.phimpme.utilities.SnackBarHandler;
import org.jetbrains.annotations.NotNull;
import java.util.ArrayList;
import butterknife.BindView;
import butterknife.ButterKnife;
import io.realm.Realm;
import io.realm.RealmQuery;
import static com.pinterest.android.pdk.PDKClient.setDebugMode;
import static org.fossasia.phimpme.R.string.no_account_signed_in;
import static org.fossasia.phimpme.data.local.AccountDatabase.AccountName.BOX;
import static org.fossasia.phimpme.data.local.AccountDatabase.AccountName.DROPBOX;
import static org.fossasia.phimpme.data.local.AccountDatabase.AccountName.IMGUR;
import static org.fossasia.phimpme.data.local.AccountDatabase.AccountName.NEXTCLOUD;
import static org.fossasia.phimpme.data.local.AccountDatabase.AccountName.OWNCLOUD;
import static org.fossasia.phimpme.data.local.AccountDatabase.AccountName.PINTEREST;
import static org.fossasia.phimpme.utilities.Constants.BOX_CLIENT_ID;
import static org.fossasia.phimpme.utilities.Constants.BOX_CLIENT_SECRET;
import static org.fossasia.phimpme.utilities.Constants.PINTEREST_APP_ID;
import static org.fossasia.phimpme.utilities.Constants.SUCCESS;
import static org.fossasia.phimpme.utilities.Utils.checkNetwork;
/**
* Created by pa1pal on 13/6/17.
*/
public class AccountActivity extends ThemedActivity implements AccountContract.View,
RecyclerItemClickListner.OnItemClickListener{
private static final int NEXTCLOUD_REQUEST_CODE = 3;
private static final int OWNCLOUD_REQUEST_CODE = 9;
private static final int RESULT_OK = 1;
public static final String BROWSABLE = "android.intent.category.BROWSABLE";
public final static String CLOUDRAIL_APP_KEY = Constants.CLOUDRAIL_LICENSE_KEY;//CloudRail_App-Key
@BindView(R.id.accounts_parent)
RelativeLayout parentLayout;
@BindView(R.id.accounts_recycler_view)
RecyclerView accountsRecyclerView;
@BindView(R.id.toolbar)
Toolbar toolbar;
@BindView(R.id.bottombar)
BottomNavigationView bottomNavigationView;
@BindView(R.id.accounts)
CoordinatorLayout coordinatorLayout;
private AccountAdapter accountAdapter;
private AccountPresenter accountPresenter;
private Realm realm = Realm.getDefaultInstance();
private RealmQuery<AccountDatabase> realmResult;
private PhimpmeProgressBarHandler phimpmeProgressBarHandler;
private TwitterAuthClient client;
private AccountDatabase account;
private DatabaseHelper databaseHelper;
private Context context;
private CloudRailServices cloudRailServices;
private PDKClient pdkClient;
// private GoogleApiClient mGoogleApiClient;
private BoxSession sessionBox;
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
ButterKnife.bind(this);
ActivitySwitchHelper.setContext(this);
parentLayout.setBackgroundColor(getBackgroundColor());
overridePendingTransition(R.anim.right_to_left,
R.anim.left_to_right);
parentLayout.setBackgroundColor(getBackgroundColor());
accountAdapter = new AccountAdapter();
accountPresenter = new AccountPresenter(realm);
phimpmeProgressBarHandler = new PhimpmeProgressBarHandler(this);
accountPresenter.attachView(this);
databaseHelper = new DatabaseHelper(realm);
client = new TwitterAuthClient();
setSupportActionBar(toolbar);
ThemeHelper themeHelper = new ThemeHelper(getContext());
toolbar.setPopupTheme(getPopupToolbarStyle());
toolbar.setBackgroundColor(themeHelper.getPrimaryColor());
bottomNavigationView.setBackgroundColor(themeHelper.getPrimaryColor());
setUpRecyclerView();
accountPresenter.loadFromDatabase(); // Calling presenter function to load data from database
getSupportActionBar().setTitle(R.string.title_account);
phimpmeProgressBarHandler.show();
cloudRailServices=CloudRailServices.getInstance();
pdkClient = PDKClient.configureInstance(this, PINTEREST_APP_ID);
pdkClient.onConnect(this);
setDebugMode(true);
// googleApiClient();
configureBoxClient();
}
/* private void googleApiClient(){
// Configure sign-in to request the user's ID, email address, and basic
// profile. ID and basic profile are included in DEFAULT_SIGN_IN.
GoogleSignInOptions gso = new GoogleSignInOptions.Builder(GoogleSignInOptions.DEFAULT_SIGN_IN)
.requestEmail()
.build();
// Build a GoogleApiClient with access to the Google Sign-In API and the
// options specified by gso.
mGoogleApiClient = new GoogleApiClient.Builder(this)
.enableAutoManage(this, AccountActivity.this)
.addApi(Auth.GOOGLE_SIGN_IN_API, gso)
.build();
}*/
private void configureBoxClient() {
BoxConfig.CLIENT_ID = BOX_CLIENT_ID;
BoxConfig.CLIENT_SECRET = BOX_CLIENT_SECRET;
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.menu_accounts_activity, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId())
{
case R.id.action_account_settings:
startActivity(new Intent(AccountActivity.this, SettingsActivity.class));
return true;
}
return super.onOptionsItemSelected(item);
}
@Override
public void setUpRecyclerView() {
RecyclerView.LayoutManager layoutManager = new LinearLayoutManager(this);
accountsRecyclerView.setLayoutManager(layoutManager);
accountsRecyclerView.setAdapter(accountAdapter);
accountsRecyclerView.addOnItemTouchListener(new RecyclerItemClickListner(this, this));
}
@Override
public void setUpAdapter(@NotNull RealmQuery<AccountDatabase> accountDetails) {
this.realmResult = accountDetails;
accountAdapter.setResults(realmResult);
}
@Override
public void showError() {
SnackBarHandler.show(coordinatorLayout, getString(no_account_signed_in));
}
@Override
public void showComplete() {
phimpmeProgressBarHandler.hide();
}
@Override
public int getContentViewId() {
return R.layout.activity_accounts;
}
@Override
public int getNavigationMenuItemId() {
return R.id.navigation_accounts;
}
@Override
public void onItemClick(final View childView, final int position) {
if (!checkNetwork(this,parentLayout)) return;
final SwitchCompat signInSignOut = childView.findViewById(R.id.sign_in_sign_out_switch);
final String name = AccountDatabase.AccountName.values()[position].toString();
if (!signInSignOut.isChecked()) {
if (!checkNetwork(this, parentLayout)) return;
switch (AccountDatabase.AccountName.values()[position]) {
case TWITTER:
signInTwitter();
break;
/*case DRUPAL:
Intent drupalShare = new Intent(getContext(), DrupalLogin.class);
startActivity(drupalShare);
break;*/
case NEXTCLOUD:
Intent nextCloudShare = new Intent(getContext(), NextCloudAuth.class);
startActivityForResult(nextCloudShare, NEXTCLOUD_REQUEST_CODE);
break;
/*case WORDPRESS:
Intent WordpressShare = new Intent(this, WordpressLoginActivity.class);
startActivity(WordpressShare);
break;*/
/* case GOOGLEDRIVE:
signInGoogleDrive();
break;*/
case PINTEREST:
signInPinterest();
break;
case FLICKR:
signInFlickr();
break;
case IMGUR:
signInImgur();
break;
case DROPBOX:
if(CLOUDRAIL_APP_KEY==null || CLOUDRAIL_APP_KEY.equals(""))
{
Snackbar.make(findViewById(android.R.id.content),R.string.Cloudrail_License_key,Snackbar.LENGTH_SHORT).show();
}
else
signInDropbox();
break;
case OWNCLOUD:
Intent ownCloudShare = new Intent(getContext(), OwnCloudActivity.class);
startActivityForResult(ownCloudShare, OWNCLOUD_REQUEST_CODE);
break;
case BOX:
sessionBox = new BoxSession(AccountActivity.this);
sessionBox.authenticate();
break;
case TUMBLR:
//signInTumblr();
break;
/*case ONEDRIVE:
signInOneDrive();
break;*/
}
} else {
AlertDialog alertDialog = new AlertDialog.Builder(this)
.setMessage(name)
.setTitle(getString(R.string.sign_out_dialog_title))
.setPositiveButton(R.string.yes_action,
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
databaseHelper
.deleteSignedOutAccount(name);
accountAdapter.notifyDataSetChanged();
accountPresenter.loadFromDatabase();
signInSignOut.setChecked(false);
BoxAuthentication.getInstance().logoutAllUsers(AccountActivity.this);
}
})
.setNegativeButton(R.string.no_action,
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
//TODO: Implement negative button action
}
})
.create();
alertDialog.show();
AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), alertDialog);
}
}
private void signInFlickr() {
BasicCallBack basicCallBack = new BasicCallBack() {
@Override
public void callBack(int status, Object data) {
if (status == SUCCESS)
SnackBarHandler.show(coordinatorLayout, getString(R.string.logged_in_flickr));
}
};
Intent intent = new Intent(this, FlickrActivity.class);
FlickrActivity.setBasicCallBack(basicCallBack);
startActivity(intent);
}
/* private void signInTumblr() {
LoginListener loginListener = new LoginListener() {
@Override
public void onLoginSuccessful(com.tumblr.loglr.LoginResult loginResult) {
SnackBarHandler.show(coordinatorLayout, getString(R.string.logged_in_tumblr));
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class,
TUMBLR.toString());
account.setToken(loginResult.getOAuthToken());
account.setSecret(loginResult.getOAuthTokenSecret());
account.setUsername(TUMBLR.toString());
realm.commitTransaction();
TumblrClient tumblrClient = new TumblrClient();
realm.beginTransaction();
BasicCallBack basicCallBack = new BasicCallBack() {
@Override
public void callBack(int status, Object data) {
account.setUsername(data.toString());
realm.commitTransaction();
}
};
tumblrClient.getName(basicCallBack);
}
};
ExceptionHandler exceptionHandler = new ExceptionHandler() {
@Override
public void onLoginFailed(RuntimeException e) {
SnackBarHandler.show(coordinatorLayout, R.string.error_volly);
}
};
Loglr.getInstance()
.setConsumerKey(Constants.TUMBLR_CONSUMER_KEY)
.setConsumerSecretKey(Constants.TUMBLR_CONSUMER_SECRET)
.setLoginListener(loginListener)
.setExceptionHandler(exceptionHandler)
.enable2FA(true)
.setUrlCallBack(Constants.CALL_BACK_TUMBLR)
.initiateInActivity(AccountActivity.this);
}*/
private void signInDropbox() {
if (accountPresenter.checkAlreadyExist(DROPBOX))
SnackBarHandler.show(coordinatorLayout, R.string.already_signed_in);
else
cloudRailServices.prepare(this);
cloudRailServices.login();
BasicCallBack basicCallBack = new BasicCallBack() {
@Override
public void callBack(int status, Object data) {
if(status == 1)
{
dropboxAuthentication(data.toString());
}
}
};
CloudRailServices.setCallBack(basicCallBack);
}
/*
Catching the intent of the external browser login and getting that data
*/
@Override
protected void onNewIntent(Intent intent) {
try{
if(intent.getCategories().contains(BROWSABLE)){
CloudRail.setAuthenticationResponse(intent);
}
}catch (Exception e)
{
//Nothing is to be done when the BROWSABLE Intent is null
}
super.onNewIntent(intent);
}
/* private void signInGoogleDrive() {
if(accountPresenter.checkAlreadyExist(GOOGLEDRIVE))
SnackBarHandler.show(coordinatorLayout,"Already Signed In");
else
cloudRailServices.prepare(this);
cloudRailServices.googleDriveLogin();
BasicCallBack basicCallBack = new BasicCallBack() {
@Override
public void callBack(int status, Object data) {
if(status == 2){
Log.e("TAG", "callBack: GOOGLE DRIVE"+data.toString() );
googleDriveAuthentication(data.toString());
}
}
};
CloudRailServices.setCallBack(basicCallBack);
}*/
/* private void signInOneDrive(){
if(accountPresenter.checkAlreadyExist(ONEDRIVE))
SnackBarHandler.show(coordinatorLayout,"Already Signed In");
else
cloudRailServices.prepare(this);
cloudRailServices.oneDriveLogin();
BasicCallBack basicCallBack = new BasicCallBack() {
@Override
public void callBack(int status, Object data) {
if(status==3){
oneDriveAuthentication(data.toString());
}
}
};
CloudRailServices.setCallBack(basicCallBack);
}*/
private void signInImgur() {
BasicCallBack basicCallBack = new BasicCallBack() {
@Override
public void callBack(int status, Object data) {
if (status == SUCCESS) {
SnackBarHandler.show(coordinatorLayout, R.string.account_logged);
if (data instanceof Bundle) {
Bundle bundle = (Bundle) data;
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class, IMGUR.toString());
account.setUsername(bundle.getString(getString(R.string.auth_username)));
account.setToken(bundle.getString(getString(R.string.auth_token)));
realm.commitTransaction();
}
}
}
};
Intent i = new Intent(AccountActivity.this, ImgurAuthActivity.class);
ImgurAuthActivity.setBasicCallBack(basicCallBack);
startActivity(i);
}
private void signInPinterest() {
ArrayList<String> scopes = new ArrayList<>();
scopes.add(PDKClient.PDKCLIENT_PERMISSION_READ_PUBLIC);
scopes.add(PDKClient.PDKCLIENT_PERMISSION_WRITE_PUBLIC);
scopes.add(PDKClient.PDKCLIENT_PERMISSION_READ_RELATIONSHIPS);
scopes.add(PDKClient.PDKCLIENT_PERMISSION_WRITE_RELATIONSHIPS);
pdkClient.login(this, scopes, new PDKCallback() {
@Override
public void onSuccess(PDKResponse response) {
Log.d(getClass().getName(), response.getData().toString());
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class, PINTEREST.toString());
account.setAccountname(PINTEREST);
account.setUsername(response.getUser().getFirstName() + " " + response.getUser().getLastName());
realm.commitTransaction();
finish();
startActivity(getIntent());
SnackBarHandler.show(coordinatorLayout, getString(R.string.account_logged_pinterest));
}
@Override
public void onFailure(PDKException exception) {
Log.e(getClass().getName(), exception.getDetailMessage());
SnackBarHandler.show(coordinatorLayout, R.string.pinterest_signIn_fail);
}
});
}
@Override
public void onItemLongPress(View childView, int position) {
// TODO: long press to implemented
}
/**
* Create twitter login and session
*/
public void signInTwitter() {
Intent i = new Intent(AccountActivity.this, LoginActivity.class);
startActivity(i);
}
/**
* Create Facebook login and session
*/
/* public void signInFacebook() {
List<String> permissionNeeds = Arrays.asList("publish_actions");
loginManager = LoginManager.getInstance();
loginManager.logInWithPublishPermissions(this, permissionNeeds);
//loginManager.logInWithReadPermissions(this, Arrays.asList("email", "public_profile"));
loginManager.registerCallback(callbackManager,
new FacebookCallback<LoginResult>() {
@Override
public void onSuccess(LoginResult loginResult) {
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class, FACEBOOK.toString());
account.setUsername(loginResult.getAccessToken().getUserId());
GraphRequest request = GraphRequest.newMeRequest(
loginResult.getAccessToken(),
new GraphRequest.GraphJSONObjectCallback() {
@Override
public void onCompleted(@NonNls JSONObject jsonObject, GraphResponse graphResponse) {
Log.v("LoginActivity", graphResponse.toString());
try {
account.setUsername(jsonObject.getString("name"));
realm.commitTransaction();
SnackBarHandler.show(coordinatorLayout, getString(R.string.logged_in_facebook));
} catch (JSONException e) {
Log.e("LoginAct", e.toString());
}
}
});
Bundle parameters = new Bundle();
parameters.putString("fields", "id,name");
request.setParameters(parameters);
request.executeAsync();
}
@Override
public void onCancel() {
SnackBarHandler.show(coordinatorLayout, getString(R.string.facebook_login_cancel));
}
@Override
public void onError(FacebookException e) {
SnackBarHandler.show(coordinatorLayout, getString(R.string.facebook_login_error));
Log.d("error", e.toString());
}
});
}*/
@Override
public Context getContext() {
this.context = this;
return context;
}
@Override
public void onResume() {
super.onResume();
ActivitySwitchHelper.setContext(this);
setNavigationBarColor(ThemeHelper.getPrimaryColor(this));
toolbar.setBackgroundColor(ThemeHelper.getPrimaryColor(this));
//dropboxAuthentication();
boxAuthentication();
setStatusBarColor();
setNavBarColor();
accountPresenter.loadFromDatabase();
accountAdapter.updateTheme();
accountAdapter.notifyDataSetChanged();
}
@Override
public void onBackPressed() {
Intent intent = new Intent(this, LFMainActivity.class);
startActivity(intent);
finish();
overridePendingTransition(R.anim.left_to_right,
R.anim.right_to_left);
}
private void boxAuthentication() {
if (sessionBox != null && sessionBox.getUser() != null) {
String accessToken = sessionBox.getAuthInfo().accessToken();
realm.beginTransaction();
// Creating Realm object for AccountDatabase Class
account = realm.createObject(AccountDatabase.class,
BOX.toString());
// Writing values in Realm database
account.setUsername(sessionBox.getUser().getName());
account.setToken(String.valueOf(accessToken));
// Finally committing the whole data
realm.commitTransaction();
accountPresenter.loadFromDatabase();
}
}
private void dropboxAuthentication(String tokens) {
try{
String result = cloudRailServices.db.saveAsString();
Log.d("AccountsActivity", "dropboxAuthentication: "+tokens +" "+result);
String accessToken = cloudRailServices.getToken();
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class, DROPBOX.toString());
account.setUsername(DROPBOX.toString());
account.setToken(String.valueOf(accessToken));
realm.commitTransaction();
}catch (Exception e )
{
//catches exception dont need handling
}
accountPresenter.loadFromDatabase();
}
/* private void oneDriveAuthentication(String tokens){
try {
String result = cloudRailServices.oneDrive.saveAsString();
Log.d("AccountsActivity", "oneDriveAuthentication: "+tokens+" "+result );
String accessToken = cloudRailServices.getOneDriveToken();
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class,ONEDRIVE.toString());
account.setUsername(ONEDRIVE.toString());
account.setToken(String.valueOf(accessToken));
realm.commitTransaction();
}
catch (Exception e){
//No need of handling it
}
accountPresenter.loadFromDatabase();
}*/
/* private void googleDriveAuthentication(String tokens) {
try{
String token = cloudRailServices.googleDrive.saveAsString();
Log.e("AccountsActivity", "googleDriveAuthentication: "+token + "Matching Token "+tokens);
String accessToken = cloudRailServices.getGoogleDriveToken();
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class,GOOGLEDRIVE.toString());
account.setUsername(GOOGLEDRIVE.toString());
account.setToken(String.valueOf(accessToken));
realm.commitTransaction();
}catch (Exception e)
{
//No need for handling
}
accountPresenter.loadFromDatabase();
}*/
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
client.onActivityResult(requestCode, resultCode, data);
// callbackManager.onActivityResult(requestCode, resultCode, data);
pdkClient.onOauthResponse(requestCode, resultCode, data);
if ((requestCode == OWNCLOUD_REQUEST_CODE && resultCode == RESULT_OK) || (requestCode == NEXTCLOUD_REQUEST_CODE && resultCode == RESULT_OK)) {
realm.beginTransaction();
if (requestCode == NEXTCLOUD_REQUEST_CODE) {
account = realm.createObject(AccountDatabase.class, NEXTCLOUD.toString());
} else {
account = realm.createObject(AccountDatabase.class, OWNCLOUD.toString());
}
account.setServerUrl(data.getStringExtra(getString(R.string.server_url)));
account.setUsername(data.getStringExtra(getString(R.string.auth_username)));
account.setPassword(data.getStringExtra(getString(R.string.auth_password)));
realm.commitTransaction();
}
/* if (requestCode == RC_SIGN_IN) {
GoogleSignInResult result = Auth.GoogleSignInApi.getSignInResultFromIntent(data);
handleSignInResult(result);
}*/
}
/*private void handleSignInResult(GoogleSignInResult result) {
if (result.isSuccess()) {
GoogleSignInAccount acct = result.getSignInAccount();//acct.getDisplayName()
SnackBarHandler.show(parentLayout,R.string.success);
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class, GOOGLEPLUS.name());account.setUsername(acct.getDisplayName());
account.setUserId(acct.getId());
realm.commitTransaction();
} else {
SnackBarHandler.show(parentLayout,R.string.google_auth_fail);
}
}*/
}
| 1 | 13,022 | you can directly call show() method from **SnackBarHandler.show(coordinatorLayout, getString(no_account_signed_in)).show();** no need to create an extra object and then call the show method. | fossasia-phimpme-android | java |
@@ -306,6 +306,19 @@ StatusOr<std::vector<HostAddr>> NetworkUtils::toHosts(const std::string& peersSt
return hosts;
}
+std::string NetworkUtils::toHosts(const std::vector<HostAddr>& hosts) {
+ std::string hostsString = "";
+ for (auto& host : hosts) {
+ std::string addrStr = network::NetworkUtils::ipFromHostAddr(host);
+ int32_t port = network::NetworkUtils::portFromHostAddr(host);
+ hostsString += folly::stringPrintf("%s:%d, ", addrStr.c_str(), port);
+ }
+ if (!hostsString.empty()) {
+ hostsString.resize(hostsString.size() - 2);
+ }
+ return hostsString;
+}
+
std::string NetworkUtils::ipFromHostAddr(const HostAddr& host) {
return intToIPv4(host.first);
} | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "network/NetworkUtils.h"
#include <netdb.h>
#include <ifaddrs.h>
#include <arpa/inet.h>
#include "fs/FileUtils.h"
namespace nebula {
namespace network {
static const int32_t kMaxHostNameLen = 256;
std::string NetworkUtils::getHostname() {
char hn[kMaxHostNameLen];
if (gethostname(hn, kMaxHostNameLen) < 0) {
LOG(ERROR) << "gethostname error : " << strerror(errno);
return "";
}
return std::string(hn);
}
StatusOr<std::string> NetworkUtils::getIPv4FromDevice(const std::string &device) {
if (device == "any") {
return "0.0.0.0";
}
auto result = listDeviceAndIPv4s();
if (!result.ok()) {
return std::move(result).status();
}
auto iter = result.value().find(device);
if (iter == result.value().end()) {
return Status::Error("No IPv4 address found for `%s'", device.c_str());
}
return iter->second;
}
StatusOr<std::vector<std::string>> NetworkUtils::listIPv4s() {
auto result = listDeviceAndIPv4s();
if (!result.ok()) {
return std::move(result).status();
}
auto getval = [] (const auto &entry) {
return entry.second;
};
std::vector<std::string> ipv4s;
ipv4s.resize(result.value().size());
std::transform(result.value().begin(), result.value().end(), ipv4s.begin(), getval);
return ipv4s;
}
StatusOr<std::unordered_map<std::string, std::string>> NetworkUtils::listDeviceAndIPv4s() {
struct ifaddrs *iflist;
std::unordered_map<std::string, std::string> dev2ipv4s;
if (::getifaddrs(&iflist) != 0) {
return Status::Error("%s", ::strerror(errno));
}
for (auto *ifa = iflist; ifa != nullptr; ifa = ifa->ifa_next) {
// Skip non-IPv4 devices
if (ifa->ifa_addr->sa_family != AF_INET) {
continue;
}
auto *addr = reinterpret_cast<struct sockaddr_in*>(ifa->ifa_addr);
// inet_ntoa is thread safe but not re-entrant,
// we could use inet_ntop instead when we need support for IPv6
dev2ipv4s[ifa->ifa_name] = ::inet_ntoa(addr->sin_addr);
}
::freeifaddrs(iflist);
if (dev2ipv4s.empty()) {
return Status::Error("No IPv4 devices found");
}
return dev2ipv4s;
}
bool NetworkUtils::getDynamicPortRange(uint16_t& low, uint16_t& high) {
FILE* pipe = popen("cat /proc/sys/net/ipv4/ip_local_port_range", "r");
if (!pipe) {
LOG(ERROR) << "Failed to open /proc/sys/net/ipv4/ip_local_port_range: "
<< strerror(errno);
return false;
}
if (fscanf(pipe, "%hu %hu", &low, &high) != 2) {
LOG(ERROR) << "Failed to read from /proc/sys/net/ipv4/ip_local_port_range";
// According to ICANN, the port range is devided into three sections
//
// Well-known ports: 0 to 1023 (used for system services)
// Registered/user ports: 1024 to 49151
// Dynamic/private ports: 49152 to 65535
low = 49152;
high = 65535;
}
if (pclose(pipe) < 0) {
LOG(ERROR) << "Failed to close the pipe: " << strerror(errno);
return false;
}
return true;
}
std::unordered_set<uint16_t> NetworkUtils::getPortsInUse() {
static const std::regex regex("[^:]+:[^:]+:([0-9A-F]+).+");
std::unordered_set<uint16_t> inUse;
{
fs::FileUtils::FileLineIterator iter("/proc/net/tcp", ®ex);
while (iter.valid()) {
auto &sm = iter.matched();
inUse.emplace(std::stoul(sm[1].str(), NULL, 16));
++iter;
}
}
{
fs::FileUtils::FileLineIterator iter("/proc/net/tcp6", ®ex);
while (iter.valid()) {
auto &sm = iter.matched();
inUse.emplace(std::stoul(sm[1].str(), NULL, 16));
++iter;
}
}
{
fs::FileUtils::FileLineIterator iter("/proc/net/udp", ®ex);
while (iter.valid()) {
auto &sm = iter.matched();
inUse.emplace(std::stoul(sm[1].str(), NULL, 16));
++iter;
}
}
{
fs::FileUtils::FileLineIterator iter("/proc/net/udp6", ®ex);
while (iter.valid()) {
auto &sm = iter.matched();
inUse.emplace(std::stoul(sm[1].str(), NULL, 16));
++iter;
}
}
{
fs::FileUtils::FileLineIterator iter("/proc/net/raw", ®ex);
while (iter.valid()) {
auto &sm = iter.matched();
inUse.emplace(std::stoul(sm[1].str(), NULL, 16));
++iter;
}
}
{
fs::FileUtils::FileLineIterator iter("/proc/net/raw6", ®ex);
while (iter.valid()) {
auto &sm = iter.matched();
inUse.emplace(std::stoul(sm[1].str(), NULL, 16));
++iter;
}
}
return inUse;
}
uint16_t NetworkUtils::getAvailablePort() {
uint16_t low = 0;
uint16_t high = 0;
CHECK(getDynamicPortRange(low, high))
<< "Failed to get the dynamic port range";
VLOG(1) << "Dynamic port range is [" << low << ", " << high << "]";
std::unordered_set<uint16_t> portsInUse = getPortsInUse();
uint16_t port = 0;
while (true) {
// NOTE
// The availablity of port number *outside* the ephemeral port range is
// relatively stable for the binding purpose.
port = folly::Random::rand32(1025, low);
if (portsInUse.find(port) != portsInUse.end()) {
continue;
}
if (portsInUse.find(port + 1) == portsInUse.end()) {
break;
}
}
return port;
}
bool NetworkUtils::ipv4ToInt(const std::string& ipStr, IPv4& ip) {
std::vector<folly::StringPiece> parts;
folly::split(".", ipStr, parts, true);
if (parts.size() != 4) {
return false;
}
ip = 0;
for (auto& s : parts) {
ip <<= 8;
try {
ip |= folly::to<uint8_t>(s);
} catch (const std::exception& ex) {
LOG(ERROR) << "Invalid ip string: \"" << ipStr << "\"";
return false;
}
}
return true;
}
std::string NetworkUtils::intToIPv4(IPv4 ip) {
static const std::vector<std::string> kDict{
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12",
"13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24",
"25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36",
"37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48",
"49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60",
"61", "62", "63", "64", "65", "66", "67", "68", "69", "70", "71", "72",
"73", "74", "75", "76", "77", "78", "79", "80", "81", "82", "83", "84",
"85", "86", "87", "88", "89", "90", "91", "92", "93", "94", "95", "96",
"97", "98", "99", "100", "101", "102", "103", "104", "105", "106",
"107", "108", "109", "110", "111", "112", "113", "114", "115", "116",
"117", "118", "119", "120", "121", "122", "123", "124", "125", "126",
"127", "128", "129", "130", "131", "132", "133", "134", "135", "136",
"137", "138", "139", "140", "141", "142", "143", "144", "145", "146",
"147", "148", "149", "150", "151", "152", "153", "154", "155", "156",
"157", "158", "159", "160", "161", "162", "163", "164", "165", "166",
"167", "168", "169", "170", "171", "172", "173", "174", "175", "176",
"177", "178", "179", "180", "181", "182", "183", "184", "185", "186",
"187", "188", "189", "190", "191", "192", "193", "194", "195", "196",
"197", "198", "199", "200", "201", "202", "203", "204", "205", "206",
"207", "208", "209", "210", "211", "212", "213", "214", "215", "216",
"217", "218", "219", "220", "221", "222", "223", "224", "225", "226",
"227", "228", "229", "230", "231", "232", "233", "234", "235", "236",
"237", "238", "239", "240", "241", "242", "243", "244", "245", "246",
"247", "248", "249", "250", "251", "252", "253", "254", "255"
};
auto& f1 = kDict[ip & 0x000000FF];
auto& f2 = kDict[(ip >> 8) & 0x000000FF];
auto& f3 = kDict[(ip >> 16) & 0x000000FF];
auto& f4 = kDict[(ip >> 24) & 0x000000FF];
char buf[16];
char* pt = buf;
strcpy(pt, f4.c_str()); // NOLINT
pt += f4.size();
*pt++ = '.';
strcpy(pt, f3.c_str()); // NOLINT
pt += f3.size();
*pt++ = '.';
strcpy(pt, f2.c_str()); // NOLINT
pt += f2.size();
*pt++ = '.';
strcpy(pt, f1.c_str()); // NOLINT
pt += f1.size();
return buf;
}
StatusOr<HostAddr> NetworkUtils::toHostAddr(folly::StringPiece ip, int32_t port) {
IPv4 ipV4;
if (!ipv4ToInt(ip.toString(), ipV4)) {
return Status::Error("Bad ip format:%s", ip.start());
}
return std::make_pair(ipV4, port);
}
StatusOr<HostAddr> NetworkUtils::toHostAddr(folly::StringPiece ipPort) {
auto pos = ipPort.find(':');
if (pos == folly::StringPiece::npos) {
return Status::Error("Bad peer format: %s", ipPort.start());
}
int32_t port;
try {
port = folly::to<int32_t>(ipPort.subpiece(pos + 1));
} catch (const std::exception& ex) {
return Status::Error("Bad port number, error: %s", ex.what());
}
return toHostAddr(ipPort.subpiece(0, pos), port);
}
StatusOr<std::vector<HostAddr>> NetworkUtils::toHosts(const std::string& peersStr) {
std::vector<HostAddr> hosts;
std::vector<std::string> peers;
folly::split(",", peersStr, peers, true);
hosts.reserve(peers.size());
for (auto& peerStr : peers) {
auto hostAddr = network::NetworkUtils::toHostAddr(folly::trimWhitespace(peerStr));
if (!hostAddr.ok()) {
return hostAddr.status();
}
hosts.emplace_back(hostAddr.value());
}
return hosts;
}
std::string NetworkUtils::ipFromHostAddr(const HostAddr& host) {
return intToIPv4(host.first);
}
int32_t NetworkUtils::portFromHostAddr(const HostAddr& host) {
return host.second;
}
StatusOr<std::string> NetworkUtils::getLocalIP(std::string defaultIP) {
if (!defaultIP.empty()) {
return defaultIP;
}
auto result = network::NetworkUtils::listDeviceAndIPv4s();
if (!result.ok()) {
return std::move(result).status();
}
for (auto& deviceIP : result.value()) {
if (deviceIP.second != "127.0.0.1") {
return deviceIP.second;
}
}
return Status::Error("No IPv4 address found!");
}
} // namespace network
} // namespace nebula
| 1 | 23,299 | concatHosts may be a better func name to explain this? | vesoft-inc-nebula | cpp |
@@ -0,0 +1,19 @@
+import { always } from 'ramda';
+
+/**
+ * A function that returns new empty string.
+ *
+ * @func stubString
+ * @memberOf RA
+ * @since {@link https://char0n.github.io/ramda-adjunct/2.1.0|v2.1.0}
+ * @category Function
+ * @sig ... -> ''
+ * @return {''}
+ * @example
+ *
+ * RA.stubString(); //=> ''
+ * RA.stubString(1, 2, 3); //=> ''
+ */
+const stubString = always('');
+
+export default stubString; | 1 | 1 | 4,971 | A function that returns empty string. | char0n-ramda-adjunct | js |
|
@@ -194,7 +194,12 @@ func parsePackage(state *core.BuildState, label, dependent core.BuildLabel, subr
}
} else {
filename, dir := buildFileName(state, label.PackageName, subrepo)
- if filename == "" {
+ if filename != "" {
+ pkg.Filename = filename
+ if err := state.Parser.ParseFile(state, pkg, pkg.Filename); err != nil {
+ return nil, err
+ }
+ } else if packageName != "" || state.Config.FeatureFlags.RemovePleasings {
exists := core.PathExists(dir)
// Handle quite a few cases to provide more obvious error messages.
if dependent != core.OriginalTarget && exists { | 1 | // Package parse implements handling parse tasks for BUILD files.
//
// The actual work to interpret them is done by the //src/parse/asp package; this
// package handles requests for parsing build targets and triggering them to
// start building when ready.
package parse
import (
"fmt"
"path"
"strings"
"gopkg.in/op/go-logging.v1"
"github.com/thought-machine/please/src/cli"
"github.com/thought-machine/please/src/core"
"github.com/thought-machine/please/src/fs"
)
var log = logging.MustGetLogger("parse")
// Parse parses the package corresponding to a single build label. The label can be :all to add all targets in a package.
// It is not an error if the package has already been parsed.
//
// By default, after the package is parsed, any targets that are now needed for the build and ready
// to be built are queued, and any new packages are queued for parsing. When a specific label is requested
// this is straightforward, but when parsing for pseudo-targets like :all and ..., various flags affect it:
// 'include' and 'exclude' refer to the labels of targets to be added. If 'include' is non-empty then only
// targets with at least one matching label are added. Any targets with a label in 'exclude' are not added.
// 'forSubinclude' is set when the parse is required for a subinclude target so should proceed
// even when we're not otherwise building targets.
func Parse(tid int, state *core.BuildState, label, dependent core.BuildLabel, forSubinclude bool) {
if err := parse(tid, state, label, dependent, forSubinclude); err != nil {
state.LogBuildError(tid, label, core.ParseFailed, err, "Failed to parse package")
}
}
func parse(tid int, state *core.BuildState, label, dependent core.BuildLabel, forSubinclude bool) error {
// See if something else has parsed this package first.
pkg := state.SyncParsePackage(label)
if pkg != nil {
// Does exist, all we need to do is toggle on this target
return activateTarget(tid, state, pkg, label, dependent, forSubinclude)
}
// If we get here then it falls to us to parse this package.
state.LogBuildResult(tid, label, core.PackageParsing, "Parsing...")
subrepo, err := checkSubrepo(tid, state, label, dependent, forSubinclude)
if err != nil {
return err
} else if subrepo != nil && subrepo.Target != nil {
// We have got the definition of the subrepo but it depends on something, make sure that has been built.
state.WaitForBuiltTarget(subrepo.Target.Label, label)
}
// Subrepo & nothing else means we just want to ensure that subrepo is present.
if label.Subrepo != "" && label.PackageName == "" && label.Name == "" {
return nil
}
pkg, err = parsePackage(state, label, dependent, subrepo)
if err != nil {
return err
}
state.LogBuildResult(tid, label, core.PackageParsed, "Parsed package")
return activateTarget(tid, state, pkg, label, dependent, forSubinclude)
}
// checkSubrepo checks whether this guy exists within a subrepo. If so we will need to make sure that's available first.
func checkSubrepo(tid int, state *core.BuildState, label, dependent core.BuildLabel, forSubinclude bool) (*core.Subrepo, error) {
if label.Subrepo == "" {
return nil, nil
} else if subrepo := state.Graph.Subrepo(label.Subrepo); subrepo != nil {
return subrepo, nil
}
// We don't have the definition of it at all. Need to parse that first.
sl := label.SubrepoLabel()
// Local subincludes are when we subinclude from a subrepo defined in the current package
localSubinclude := sl.PackageName == dependent.PackageName && forSubinclude
// If we're including from the same package, we don't want to parse the subrepo package
if !localSubinclude {
if handled, err := parseSubrepoPackage(tid, state, sl.PackageName, "", label); err != nil {
return nil, err
} else if !handled {
if _, err := parseSubrepoPackage(tid, state, sl.PackageName, dependent.Subrepo, label); err != nil {
return nil, err
}
}
}
if subrepo := state.Graph.Subrepo(label.Subrepo); subrepo != nil {
return subrepo, nil
} else if subrepo := checkArchSubrepo(state, label.Subrepo); subrepo != nil {
return subrepo, nil
}
if !localSubinclude {
// Fix for #577; fallback like above, it might be defined within the subrepo.
if handled, err := parseSubrepoPackage(tid, state, sl.PackageName, dependent.Subrepo, label); handled && err == nil {
return state.Graph.Subrepo(label.Subrepo), nil
}
return nil, fmt.Errorf("Subrepo %s is not defined (referenced by %s)", label.Subrepo, dependent)
}
// For local subincludes, the subrepo has to already be defined at this point in the BUILD file
return nil, fmt.Errorf("%s -> %s", dependent, label)
}
// parseSubrepoPackage parses a package to make sure subrepos are available.
func parseSubrepoPackage(tid int, state *core.BuildState, pkg, subrepo string, dependent core.BuildLabel) (bool, error) {
if state.Graph.Package(pkg, subrepo) == nil {
// Don't have it already, must parse.
label := core.BuildLabel{Subrepo: subrepo, PackageName: pkg, Name: "all"}
return true, parse(tid, state, label, dependent, true)
}
return false, nil
}
// checkArchSubrepo checks if a target refers to a cross-compiling subrepo.
// Those don't have to be explicitly defined - maybe we should insist on that, but it's nicer not to have to.
func checkArchSubrepo(state *core.BuildState, name string) *core.Subrepo {
var arch cli.Arch
if err := arch.UnmarshalFlag(name); err == nil {
return state.Graph.MaybeAddSubrepo(core.SubrepoForArch(state, arch))
}
return nil
}
// activateTarget marks a target as active (ie. to be built) and adds its dependencies as pending parses.
func activateTarget(tid int, state *core.BuildState, pkg *core.Package, label, dependent core.BuildLabel, forSubinclude bool) error {
if !label.IsAllTargets() && state.Graph.Target(label) == nil {
if label.Subrepo == "" && label.PackageName == "" && label.Name == dependent.Subrepo {
if subrepo := checkArchSubrepo(state, label.Name); subrepo != nil {
state.LogBuildResult(tid, label, core.TargetBuilt, "Instantiated subrepo")
return nil
}
}
if state.Config.Bazel.Compatibility && forSubinclude {
// Bazel allows some things that look like build targets but aren't - notably the syntax
// to load(). It suits us to treat that as though it is one, but we now have to
// implicitly make it available.
exportFile(state, pkg, label)
} else {
msg := fmt.Sprintf("Parsed build file %s but it doesn't contain target %s", pkg.Filename, label.Name)
if dependent != core.OriginalTarget {
msg += fmt.Sprintf(" (depended on by %s)", dependent)
}
return fmt.Errorf(msg + suggestTargets(pkg, label, dependent))
}
}
if state.ParsePackageOnly && !forSubinclude {
return nil // Some kinds of query don't need a full recursive parse.
} else if label.IsAllTargets() {
if dependent == core.OriginalTarget {
for _, target := range pkg.AllTargets() {
// Don't activate targets that were added in a post-build function; that causes a race condition
// between the post-build functions running and other things trying to activate them too early.
if state.ShouldInclude(target) && !target.AddedPostBuild {
// Must always do this for coverage because we need to calculate sources of
// non-test targets later on.
if !state.NeedTests || target.IsTest || state.NeedCoverage {
if err := state.QueueTarget(target.Label, dependent, false, dependent.IsAllTargets()); err != nil {
return err
}
}
}
}
}
} else {
for _, l := range state.Graph.DependentTargets(dependent, label) {
// We use :all to indicate a dependency needed for parse.
if err := state.QueueTarget(l, dependent, false, forSubinclude || dependent.IsAllTargets()); err != nil {
return err
}
}
}
return nil
}
// parsePackage parses a BUILD file and adds the package to the build graph
func parsePackage(state *core.BuildState, label, dependent core.BuildLabel, subrepo *core.Subrepo) (*core.Package, error) {
packageName := label.PackageName
pkg := core.NewPackage(packageName)
pkg.Subrepo = subrepo
if subrepo != nil {
pkg.SubrepoName = subrepo.Name
}
if packageName == InternalPackageName {
pkgStr, err := GetInternalPackage(state.Config)
if err != nil {
return nil, fmt.Errorf("faild to generate internal package: %w", err)
}
if err := state.Parser.ParseReader(state, pkg, strings.NewReader(pkgStr)); err != nil {
return nil, fmt.Errorf("faild to parse internal package: %w", err)
}
} else {
filename, dir := buildFileName(state, label.PackageName, subrepo)
if filename == "" {
exists := core.PathExists(dir)
// Handle quite a few cases to provide more obvious error messages.
if dependent != core.OriginalTarget && exists {
return nil, fmt.Errorf("%s depends on %s, but there's no %s file in %s/", dependent, label, buildFileNames(state.Config.Parse.BuildFileName), dir)
} else if dependent != core.OriginalTarget {
return nil, fmt.Errorf("%s depends on %s, but the directory %s doesn't exist: %s", dependent, label, dir, packageName)
} else if exists {
return nil, fmt.Errorf("Can't build %s; there's no %s file in %s/", label, buildFileNames(state.Config.Parse.BuildFileName), dir)
}
return nil, fmt.Errorf("Can't build %s; the directory %s doesn't exist", label, dir)
}
pkg.Filename = filename
if err := state.Parser.ParseFile(state, pkg, pkg.Filename); err != nil {
return nil, err
}
}
if !state.Config.FeatureFlags.RemovePleasings {
// TODO(jpoole): delete this code branch in the v16 release
// If the config setting is on, we "magically" register a default repo called @pleasings.
if packageName == "" && subrepo == nil && state.Config.Parse.BuiltinPleasings && pkg.Target("pleasings") == nil {
if _, err := state.Parser.(*aspParser).asp.ParseReader(pkg, strings.NewReader(pleasings)); err != nil {
log.Fatalf("Failed to load pleasings: %s", err) // This shouldn't happen, of course.
}
}
}
// Verify some details of the output files in the background. Don't need to wait for this
// since it only issues warnings sometimes.
go pkg.VerifyOutputs()
state.Graph.AddPackage(pkg) // Calling this means nobody else will add entries to pendingTargets for this package.
return pkg, nil
}
// buildFileName returns the name of the BUILD file for a package, or the empty string if one
// doesn't exist. It also returns the directory that it looked in.
func buildFileName(state *core.BuildState, pkgName string, subrepo *core.Subrepo) (string, string) {
config := state.Config
if subrepo != nil {
pkgName = subrepo.Dir(pkgName)
config = subrepo.State.Config
}
// Bazel defines targets in its "external" package from its WORKSPACE file.
// We will fake this by treating that as an actual package file...
// TODO(peterebden): They may be moving away from their "external" nomenclature?
if state.Config.Bazel.Compatibility && pkgName == "external" || pkgName == "workspace" {
return "WORKSPACE", ""
}
for _, buildFileName := range config.Parse.BuildFileName {
if filename := path.Join(core.RepoRoot, pkgName, buildFileName); fs.FileExists(filename) {
return filename, pkgName
}
}
return "", pkgName
}
func rescanDeps(state *core.BuildState, changed map[*core.BuildTarget]struct{}) error {
// Run over all the changed targets in this package and ensure that any newly added dependencies enter the build queue.
for target := range changed {
if !state.Graph.AllDependenciesResolved(target) {
for _, dep := range target.DeclaredDependencies() {
state.Graph.AddDependency(target.Label, dep)
}
}
if s := target.State(); s < core.Built && s > core.Inactive {
if err := state.QueueTarget(target.Label, core.OriginalTarget, true, false); err != nil {
return err
}
}
}
return nil
}
// This is the builtin subrepo for pleasings.
// TODO(peterebden): Should really provide a github_archive builtin that knows how to construct
// the URL and strip_prefix etc.
const pleasings = `
http_archive(
name = "pleasings",
strip_prefix = "pleasings-master",
urls = ["https://github.com/thought-machine/pleasings/archive/master.zip"],
)
`
// exportFile adds a single-file export target. This is primarily used for Bazel compat.
func exportFile(state *core.BuildState, pkg *core.Package, label core.BuildLabel) {
t := core.NewBuildTarget(label)
t.Subrepo = pkg.Subrepo
t.IsFilegroup = true
t.AddSource(core.NewFileLabel(label.Name, pkg))
state.AddTarget(pkg, t)
}
| 1 | 9,604 | If it's not the root package, or if it is the root package but we don't add the psudo :pleasings rule there, print an error. | thought-machine-please | go |
@@ -17,7 +17,9 @@ import (
"strconv"
"github.com/go-playground/validator/v10"
+
corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation"
)
| 1 | // Copyright 2020 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package apivalidator
import (
"strconv"
"github.com/go-playground/validator/v10"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/validation"
)
// NamespaceSelectorsValid can be used to check whether namespace selectors is valid.
func NamespaceSelectorsValid(fl validator.FieldLevel) bool {
ns, ok := fl.Field().Interface().([]string)
if !ok {
return false
}
for _, n := range ns {
if len(n) == 0 || len(n) > 63 {
return false
}
if !checkName(n) {
return false
}
}
return true
}
// MapSelectorsValid can be used to check whether map selectors is valid.
func MapSelectorsValid(fl validator.FieldLevel) bool {
if fl.Field().IsNil() {
return true
}
ms, ok := fl.Field().Interface().(map[string]string)
if !ok {
return false
}
for k := range ms {
if len(validation.IsQualifiedName(k)) != 0 {
return false
}
}
return true
}
// PhaseSelectorsValid can be used to check whether phase selectors is valid.
func PhaseSelectorsValid(fl validator.FieldLevel) bool {
ph, ok := fl.Field().Interface().([]string)
if !ok {
return false
}
for _, phase := range ph {
if !checkPhase(phase) {
return false
}
}
return true
}
// ValueValid can be used to check whether the mode value is valid.
func ValueValid(fl validator.FieldLevel) bool {
val := fl.Field().String()
if val == "" {
return true
}
f, err := strconv.ParseFloat(val, 64)
if err != nil {
return false
}
if f < 0 {
return false
}
return true
}
func checkPhase(ph string) bool {
phases := []corev1.PodPhase{
corev1.PodRunning,
corev1.PodFailed,
corev1.PodPending,
corev1.PodSucceeded,
corev1.PodUnknown,
corev1.PodPending,
}
for _, phase := range phases {
if string(phase) == ph {
return true
}
}
return false
}
// PodsValid can be used to check whether the pod name is valid.
func PodsValid(fl validator.FieldLevel) bool {
if fl.Field().IsNil() {
return true
}
pods, ok := fl.Field().Interface().(map[string][]string)
if !ok {
return false
}
for ns, ps := range pods {
if !checkName(ns) {
return false
}
for _, p := range ps {
if !checkName(p) {
return false
}
}
}
return true
}
| 1 | 18,813 | can delete this line | chaos-mesh-chaos-mesh | go |
@@ -76,6 +76,18 @@ generic_data_store::generic_data_store(generic_data_reader *reader, model *m) :
if (opts->has_bool("verbose") && opts->get_bool("verbose")) {
m_verbose = true;
}
+
+ if (opts->has_string("use_tarball")) {
+ m_dir = m_reader->get_local_file_dir();
+ }
+
+ if (m_comm->get_num_models() != 1) {
+ if (m_master) {
+ std::cerr << "\nFATAL ERROR: data store classes currently assume there is\n"
+ << "a single model; please ask Dave Hysom to fix!\n\n";
+ }
+ exit(9);
+ }
}
void generic_data_store::get_minibatch_index_vector() { | 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
//
////////////////////////////////////////////////////////////////////////////////
#include "lbann/data_store/generic_data_store.hpp"
#include "lbann/data_readers/data_reader.hpp"
#include "lbann/utils/options.hpp"
#include "lbann/models/model.hpp"
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <numeric>
namespace lbann {
generic_data_store::generic_data_store(generic_data_reader *reader, model *m) :
m_reader(reader),
m_comm(m->get_comm()),
m_my_minibatch_indices(nullptr),
m_epoch(0),
m_in_memory(true),
m_model(m),
m_dir(m_reader->get_file_dir()),
m_extended_testing(false),
m_is_subsidiary_store(false),
m_cur_minibatch(1000000),
m_is_setup(false),
m_verbose(false)
{
if (m_comm == nullptr) {
std::stringstream err;
err << __FILE__ << " " << __LINE__ << " :: "
<< " m_reader->get_comm is nullptr";
throw lbann_exception(err.str());
}
m_master = m_comm->am_world_master();
m_rank = m_comm->get_rank_in_model();
m_np = m_comm->get_procs_per_model();
m_mpi_comm = m_comm->get_model_comm().comm;
set_name("generic_data_store");
options *opts = options::get();
if (m_master) std::cerr << "generic_data_store::generic_data_store; np: " << m_np << "\n";
if (opts->has_bool("extended_testing") && opts->get_bool("extended_testing")) {
m_extended_testing = true;
}
if (opts->has_bool("local_disk") && opts->get_bool("local_disk")) {
if (m_master) std::cerr << "running in out-of-memory mode\n";
m_in_memory = false;
}
if (opts->has_bool("verbose") && opts->get_bool("verbose")) {
m_verbose = true;
}
}
void generic_data_store::get_minibatch_index_vector() {
size_t s2 = 0;
for (auto t1 : (*m_my_minibatch_indices)) {
s2 += t1.size();
}
m_my_minibatch_indices_v.reserve(s2);
for (auto t1 : (*m_my_minibatch_indices)) {
for (auto t2 : t1) {
m_my_minibatch_indices_v.push_back(t2);
}
}
}
void generic_data_store::get_my_datastore_indices() {
m_num_samples.resize(m_np, 0);
std::unordered_set<int> mine;
for (size_t j=0; j<m_shuffled_indices->size(); ++j) {
int idx = (*m_shuffled_indices)[j];
int owner = idx % m_np;
m_num_samples[owner] += 1;
if (owner == m_rank) {
m_my_datastore_indices.insert(idx);
}
}
}
void generic_data_store::setup() {
set_shuffled_indices( &(m_reader->get_shuffled_indices()) );
set_num_global_indices();
m_num_readers = m_reader->get_num_parallel_readers();
if (m_master) {
std::cerr << "data_reader type is: " << m_reader->get_type() << "\n";
}
if (is_subsidiary_store()) {
return;
}
// get the set of global indices used by this processor in
// generic_data_reader::fetch_data(). Note that these are
// "original' indices, not shuffled indices, i.e, these indices
// remain constant through all epochs
if (m_master) { std::cerr << "calling m_model->collect_indices\n"; }
m_reader->set_save_minibatch_entries(true);
if (m_reader->get_role() == "train") {
m_model->collect_indices(execution_mode::training);
} else if (m_reader->get_role() == "validate") {
m_model->collect_indices(execution_mode::validation);
} else if (m_reader->get_role() == "test") {
m_model->collect_indices(execution_mode::testing);
} else {
std::stringstream s2;
s2 << __FILE__ << " " << __LINE__ << " :: "
<< " bad role; should be train, test, or validate;"
<< " we got: " << m_reader->get_role();
throw lbann_exception(s2.str());
}
m_reader->set_save_minibatch_entries(false);
m_my_minibatch_indices = &(m_reader->get_minibatch_indices());
}
void generic_data_store::print_partitioned_indices() {
if (! m_master) {
return;
}
std::cerr << "\n\n=============================================\n"
<< "minibatch indices:\n";
for (size_t j=0; j<m_all_partitioned_indices.size(); j++) {
std::cerr << "===== P_"<<j<<"\n";
for (size_t i=0; i<m_all_partitioned_indices[j].size(); i++) {
std::cerr << " mb #" << i << " ";
for (size_t k=0; k<m_all_partitioned_indices[j][i].size(); k++) {
std::cerr << m_all_partitioned_indices[j][i][k] << " ";
}
std::cerr << "\n";
}
}
std::cerr << "=============================================\n\n";
}
size_t generic_data_store::get_file_size(std::string dir, std::string fn) {
std::string imagepath;
if (m_dir == "") {
imagepath = fn;
} else {
imagepath = dir + fn;
}
struct stat st;
if (stat(imagepath.c_str(), &st) != 0) {
std::stringstream err;
err << __FILE__ << " " << __LINE__ << " :: "
<< "stat failed for dir: " << dir
<< " and fn: " << fn;
throw lbann_exception(err.str());
}
return st.st_size;
}
void generic_data_store::set_shuffled_indices(const std::vector<int> *indices, bool exchange_indices) {
m_shuffled_indices = indices;
++m_epoch;
if (m_epoch > 1 && exchange_indices && m_in_memory) {
exchange_data();
}
}
void generic_data_store::exchange_mb_counts() {
int my_num_indices = m_my_minibatch_indices_v.size();
m_mb_counts.resize(m_np);
m_comm->model_all_gather<int>(my_num_indices, m_mb_counts);
}
void generic_data_store::exchange_mb_indices() {
exchange_mb_counts();
//setup data structures to exchange minibatch indices with all processors
//displacement vector
std::vector<int> displ(m_np);
displ[0] = 0;
for (size_t j=1; j<m_mb_counts.size(); j++) {
displ[j] = displ[j-1] + m_mb_counts[j-1];
}
//recv vector
int n = std::accumulate(m_mb_counts.begin(), m_mb_counts.end(), 0);
std::vector<int> all_indices(n);
//receive the indices
m_comm->all_gather<int>(m_my_minibatch_indices_v, all_indices, m_mb_counts, displ, m_comm->get_world_comm());
//fill in the final data structure
m_all_minibatch_indices.resize(m_np);
for (int j=0; j<m_np; j++) {
m_all_minibatch_indices[j].reserve(m_mb_counts[j]);
for (int i=displ[j]; i<displ[j]+m_mb_counts[j]; i++) {
m_all_minibatch_indices[j].push_back(all_indices[i]);
}
}
}
void generic_data_store::exchange_partitioned_indices() {
//determine the largest number of minibatches over all processors
std::vector<int> counts(m_np);
int my_num_mb = m_my_minibatch_indices->size();
m_comm->model_all_gather<int>(my_num_mb, counts);
m_num_minibatches = 0;
for (auto t : counts) {
m_num_minibatches = (size_t)t > m_num_minibatches ? t : m_num_minibatches;
}
if (m_master) std::cerr << "num minibatches: " << m_num_minibatches << "\n";
//pack m_my_minibatch_indices into a single vector;
//first, compute vector size, and exchange size with all procs
std::vector<int> v;
int count = m_my_minibatch_indices->size() + 1;
for (auto t : (*m_my_minibatch_indices)) {
count += t.size();
}
m_comm->model_all_gather<int>(count, counts);
//now, fill in the vector
std::vector<int> w;
w.reserve(count);
w.push_back(m_my_minibatch_indices->size());
for (auto t : (*m_my_minibatch_indices)) {
w.push_back(t.size());
for (size_t h=0; h<t.size(); h++) {
w.push_back(t[h]);
}
}
if (w.size() != (size_t)count) {
std::stringstream err;
err << "count: " << count << " w.size: " << w.size();
throw lbann_exception(err.str());
}
// exchange the vectors
std::vector<int> displ(m_np);
displ[0] = 0;
for (size_t k=1; k<counts.size(); k++) {
displ[k] = displ[k-1] + counts[k-1];
}
//construct recv vector
int n = std::accumulate(counts.begin(), counts.end(), 0);
std::vector<int> all_w(n);
//exchange the indices
m_comm->all_gather<int>(w, all_w, counts, displ, m_comm->get_world_comm());
//fill in the final data structure
m_all_partitioned_indices.resize(m_np);
for (size_t p=0; p<(size_t)m_np; p++) {
int *ww = all_w.data() + displ[p];
//note: it's possible that m_num_minibatches > num_minibatches;
// that's OK; for simplicity elsewhere in the code we want
// all procs to have the same number of minibatches
m_all_partitioned_indices[p].resize(m_num_minibatches);
size_t num_minibatches = *ww++;
for (size_t i=0; i<num_minibatches; i++) {
int mb_size = *ww++;
m_all_partitioned_indices[p][i].reserve(mb_size);
for (int j=0; j<mb_size; j++) {
m_all_partitioned_indices[p][i].push_back(*ww++);
}
}
}
}
void generic_data_store::init_minibatch() {
if (! m_in_memory) {
fetch_data();
}
}
} // namespace lbann
| 1 | 12,721 | Will it still be an issue if all the models use the same data set? | LLNL-lbann | cpp |
@@ -37,6 +37,8 @@ class WidgetsController < ApplicationController
def handle_xml_format
return unless request_format == 'xml'
@type = WIDGET_TYPES.select { |klass| controller_name.include?(klass) }[0]
+ @parent = @widget.send(@type)
+ fail ParamRecordNotFound unless @parent
render template: 'widgets/metadata'
end
end | 1 | class WidgetsController < ApplicationController
WIDGET_TYPES = %w(account project stack organization)
helper :widgets
before_action :set_widget, except: :index
rescue_from ActiveRecord::RecordNotFound, with: :record_not_found
layout :false, except: :index
before_action :handle_xml_format, except: :index
private
def record_not_found
render text: I18n.t('widgets.not_found')
end
def set_widget
widget_name = action_name.split('_') - WIDGET_TYPES
@widget = Object.const_get("#{controller_name.camelize[0..-2]}::#{widget_name.join('_').camelize}").new(params)
end
def render_image_for_gif_format
return unless request.format.gif?
send_data(@widget.image, disposition: 'inline', type: 'image/gif', filename: 'widget.gif', status: 200)
end
def render_not_supported_for_gif_format
return unless request.format.gif?
image = WidgetBadge::Thin.create([text: 'Not supported'])
send_data(image, disposition: 'inline', type: 'image/gif', filename: 'widget.gif', status: 406)
end
def render_iframe_for_js_format
return unless request.format.js?
render :iframe
end
def handle_xml_format
return unless request_format == 'xml'
@type = WIDGET_TYPES.select { |klass| controller_name.include?(klass) }[0]
render template: 'widgets/metadata'
end
end
| 1 | 7,923 | How does this controller_name method work? Does this method grab the context of the url request? Wouldn't this always result in 'widget'? | blackducksoftware-ohloh-ui | rb |
@@ -18,6 +18,9 @@
* limitations under the License.
*/
+#define FS_CHUNK_SIZE_DEBUG(op) {flb_debug("[%d] %s -> fs_chunks_size = %zu", __LINE__, op->name, op->fs_chunks_size);}
+#define FS_CHUNK_SIZE_DEBUG_MOD(op, chunk, mod) {flb_debug("[%d] %s -> fs_chunks_size = %zu mod=%zd chunk=%s", __LINE__, op->name, op->fs_chunks_size, mod, flb_input_chunk_get_name(chunk));}
+
#include <fluent-bit/flb_info.h>
#include <fluent-bit/flb_input.h>
#include <fluent-bit/flb_input_chunk.h> | 1 | /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/* Fluent Bit
* ==========
* Copyright (C) 2019-2021 The Fluent Bit Authors
* Copyright (C) 2015-2018 Treasure Data Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <fluent-bit/flb_info.h>
#include <fluent-bit/flb_input.h>
#include <fluent-bit/flb_input_chunk.h>
#include <fluent-bit/flb_input_plugin.h>
#include <fluent-bit/flb_storage.h>
#include <fluent-bit/flb_time.h>
#include <fluent-bit/flb_router.h>
#include <fluent-bit/flb_task.h>
#include <fluent-bit/flb_routes_mask.h>
#include <fluent-bit/flb_metrics.h>
#include <fluent-bit/stream_processor/flb_sp.h>
#include <chunkio/chunkio.h>
#define BLOCK_UNTIL_KEYPRESS() {char temp_keypress_buffer; read(0, &temp_keypress_buffer, 1);}
#define FLB_INPUT_CHUNK_RELEASE_SCOPE_LOCAL 0
#define FLB_INPUT_CHUNK_RELEASE_SCOPE_GLOBAL 1
extern ssize_t sb_get_releasable_output_queue_space(struct flb_output_instance *output_plugin,
size_t required_space);
extern int sb_release_output_queue_space(struct flb_output_instance *output_plugin,
size_t required_space);
static int flb_input_chunk_safe_delete(struct flb_input_chunk *ic,
struct flb_input_chunk *old_ic,
uint64_t o_id);
static int flb_input_chunk_is_task_safe_delete(struct flb_task *task);
static ssize_t flb_input_chunk_get_real_size(struct flb_input_chunk *ic);
static ssize_t flb_input_chunk_get_releasable_space(
struct flb_input_chunk *new_input_chunk,
struct flb_input_instance *input_plugin,
struct flb_output_instance *output_plugin,
size_t required_space)
{
struct mk_list *input_chunk_iterator;
ssize_t releasable_space;
struct flb_input_chunk *old_input_chunk;
releasable_space = 0;
mk_list_foreach(input_chunk_iterator, &input_plugin->chunks) {
old_input_chunk = mk_list_entry(input_chunk_iterator, struct flb_input_chunk, _head);
if (!flb_routes_mask_get_bit(old_input_chunk->routes_mask, output_plugin->id)) {
continue;
}
if (flb_input_chunk_safe_delete(new_input_chunk, old_input_chunk,
output_plugin->id) == FLB_FALSE ||
flb_input_chunk_is_task_safe_delete(old_input_chunk->task) == FLB_FALSE) {
continue;
}
releasable_space += flb_input_chunk_get_real_size(old_input_chunk);
if (releasable_space >= required_space) {
break;
}
}
return releasable_space;
}
static int flb_input_chunk_release_space(
struct flb_input_chunk *new_input_chunk,
struct flb_input_instance *input_plugin,
struct flb_output_instance *output_plugin,
ssize_t required_space,
int release_scope)
{
struct mk_list *input_chunk_iterator_tmp;
struct mk_list *input_chunk_iterator;
int chunk_destroy_flag;
struct flb_input_chunk *old_input_chunk;
ssize_t released_space;
int chunk_released;
ssize_t chunk_size;
released_space = 0;
mk_list_foreach_safe(input_chunk_iterator, input_chunk_iterator_tmp,
&input_plugin->chunks) {
old_input_chunk = mk_list_entry(input_chunk_iterator,
struct flb_input_chunk, _head);
if (!flb_routes_mask_get_bit(old_input_chunk->routes_mask,
output_plugin->id)) {
continue;
}
if (flb_input_chunk_safe_delete(new_input_chunk,
old_input_chunk,
output_plugin->id) == FLB_FALSE ||
flb_input_chunk_is_task_safe_delete(old_input_chunk->task) == FLB_FALSE) {
continue;
}
chunk_size = flb_input_chunk_get_real_size(old_input_chunk);
chunk_released = FLB_FALSE;
chunk_destroy_flag = FLB_FALSE;
if (release_scope == FLB_INPUT_CHUNK_RELEASE_SCOPE_LOCAL) {
flb_routes_mask_clear_bit(old_input_chunk->routes_mask,
output_plugin->id);
output_plugin->fs_chunks_size -= chunk_size;
chunk_destroy_flag = flb_routes_mask_is_empty(
old_input_chunk->routes_mask);
chunk_released = FLB_TRUE;
}
else if (release_scope == FLB_INPUT_CHUNK_RELEASE_SCOPE_GLOBAL) {
chunk_destroy_flag = FLB_TRUE;
}
if (chunk_destroy_flag) {
if (old_input_chunk->task != NULL) {
/*
* If the chunk is referenced by a task and task has no active route,
* we need to destroy the task as well.
*/
if (old_input_chunk->task->users == 0) {
flb_debug("[task] drop task_id %d with no active route from input plugin %s",
old_input_chunk->task->id, new_input_chunk->in->name);
flb_task_destroy(old_input_chunk->task, FLB_TRUE);
chunk_released = FLB_TRUE;
}
}
else {
flb_debug("[input chunk] drop chunk %s with no output route from input plugin %s",
flb_input_chunk_get_name(old_input_chunk), new_input_chunk->in->name);
flb_input_chunk_destroy(old_input_chunk, FLB_TRUE);
chunk_released = FLB_TRUE;
}
}
if (chunk_released) {
released_space += chunk_size;
}
if (released_space >= required_space) {
break;
}
}
if (released_space < required_space) {
return -2;
}
return 0;
}
static void generate_chunk_name(struct flb_input_instance *in,
char *out_buf, int buf_size)
{
struct flb_time tm;
(void) in;
flb_time_get(&tm);
snprintf(out_buf, buf_size - 1,
"%i-%lu.%4lu.flb",
getpid(),
tm.tm.tv_sec, tm.tm.tv_nsec);
}
ssize_t flb_input_chunk_get_size(struct flb_input_chunk *ic)
{
return cio_chunk_get_content_size(ic->chunk);
}
/*
* When chunk is set to DOWN from memory, data_size is set to 0 and
* cio_chunk_get_content_size(1) returns the data_size. fs_chunks_size
* is used to track the size of chunks in filesystem so we need to call
* cio_chunk_get_real_size to return the original size in the file system
*/
static ssize_t flb_input_chunk_get_real_size(struct flb_input_chunk *ic)
{
ssize_t meta_size;
ssize_t size;
size = cio_chunk_get_real_size(ic->chunk);
if (size != 0) {
return size;
}
// Real size is not synced to chunk yet
size = flb_input_chunk_get_size(ic);
if (size == 0) {
flb_debug("[input chunk] no data in the chunk %s",
flb_input_chunk_get_name(ic));
return -1;
}
meta_size = cio_meta_size(ic->chunk);
size += meta_size
/* See https://github.com/edsiper/chunkio#file-layout for more details */
+ 2 /* HEADER BYTES */
+ 4 /* CRC32 */
+ 16 /* PADDING */
+ 2; /* METADATA LENGTH BYTES */
return size;
}
int flb_input_chunk_write(void *data, const char *buf, size_t len)
{
int ret;
struct flb_input_chunk *ic;
ic = (struct flb_input_chunk *) data;
ret = cio_chunk_write(ic->chunk, buf, len);
#ifdef FLB_HAVE_METRICS
if (ret == CIO_OK) {
ic->added_records = flb_mp_count(buf, len);
ic->total_records += ic->added_records;
}
#endif
return ret;
}
int flb_input_chunk_write_at(void *data, off_t offset,
const char *buf, size_t len)
{
int ret;
struct flb_input_chunk *ic;
ic = (struct flb_input_chunk *) data;
ret = cio_chunk_write_at(ic->chunk, offset, buf, len);
return ret;
}
/*
* For input_chunk referenced by an outgoing task, we need to check
* whether the chunk is in the middle of output flush callback
*/
static int flb_input_chunk_is_task_safe_delete(struct flb_task *task)
{
if (!task) {
return FLB_TRUE;
}
if (task->users != 0) {
return FLB_FALSE;
}
return FLB_TRUE;
}
static int flb_input_chunk_safe_delete(struct flb_input_chunk *ic,
struct flb_input_chunk *old_ic,
uint64_t o_id)
{
/* The chunk we want to drop should not be the incoming chunk */
if (ic == old_ic) {
return FLB_FALSE;
}
/*
* Even if chunks from same input plugin have same routes_mask when created,
* the routes_mask could be modified when new chunks is ingested. Therefore,
* we still need to do the validation on the routes_mask with o_id.
*/
if (flb_routes_mask_get_bit(old_ic->routes_mask, o_id) == 0) {
return FLB_FALSE;
}
return FLB_TRUE;
}
int flb_input_chunk_release_space_compound(
struct flb_input_chunk *new_input_chunk,
struct flb_output_instance *output_plugin,
size_t *local_release_requirement,
int release_local_space)
{
ssize_t segregated_backlog_releasable_space;
ssize_t active_backlog_releasable_space;
ssize_t active_plugin_releasable_space;
ssize_t required_space_remainder;
struct flb_input_instance *storage_backlog_instance;
int result;
storage_backlog_instance = output_plugin->config->storage_input_plugin;
*local_release_requirement = flb_input_chunk_get_real_size(new_input_chunk);
required_space_remainder = (ssize_t) *local_release_requirement;
segregated_backlog_releasable_space = 0;
active_backlog_releasable_space = 0;
active_plugin_releasable_space = 0;
active_backlog_releasable_space = flb_input_chunk_get_releasable_space(
new_input_chunk,
storage_backlog_instance,
output_plugin,
required_space_remainder);
required_space_remainder -= active_backlog_releasable_space;
if (required_space_remainder > 0) {
segregated_backlog_releasable_space = sb_get_releasable_output_queue_space(
output_plugin,
required_space_remainder);
required_space_remainder -= segregated_backlog_releasable_space;
}
if (required_space_remainder > 0) {
active_plugin_releasable_space = flb_input_chunk_get_releasable_space(
new_input_chunk,
new_input_chunk->in,
output_plugin,
required_space_remainder);
required_space_remainder -= active_plugin_releasable_space;
}
/* When we get here required_space_remainder could be negative but it's not a problem
* this happens when the weight of the removed chunk is higher than the remainder of
* the required space and it's not something that can nor should be prevented.
*/
if (required_space_remainder > 0) {
return -2;
}
required_space_remainder = (ssize_t) *local_release_requirement;
if (required_space_remainder > 0 && active_backlog_releasable_space > 0) {
result = flb_input_chunk_release_space(new_input_chunk,
storage_backlog_instance,
output_plugin,
active_backlog_releasable_space,
FLB_INPUT_CHUNK_RELEASE_SCOPE_GLOBAL);
if (result) {
return -3;
}
required_space_remainder -= active_backlog_releasable_space;
}
if (required_space_remainder > 0 && segregated_backlog_releasable_space > 0) {
result = sb_release_output_queue_space(
output_plugin,
segregated_backlog_releasable_space);
if (result) {
*local_release_requirement = (size_t) required_space_remainder;
return -4;
}
required_space_remainder -= segregated_backlog_releasable_space;
}
if (release_local_space) {
if (required_space_remainder > 0 && active_plugin_releasable_space > 0) {
result = flb_input_chunk_release_space(new_input_chunk,
new_input_chunk->in,
output_plugin,
active_plugin_releasable_space,
FLB_INPUT_CHUNK_RELEASE_SCOPE_LOCAL);
if (result) {
printf("FAILED\n");
return -5;
}
required_space_remainder -= active_plugin_releasable_space;
}
}
if (required_space_remainder < 0) {
required_space_remainder = 0;
}
*local_release_requirement = (size_t) required_space_remainder;
return 0;
}
/*
* Returns how many chunks needs to be dropped in order to get enough space to
* buffer the incoming data (with size chunk_size)
*/
int flb_intput_chunk_count_dropped_chunks(struct flb_input_chunk *ic,
struct flb_output_instance *o_ins,
size_t chunk_size)
{
int count = 0;
int enough_space = FLB_FALSE;
ssize_t bytes_remained;
struct mk_list *head;
struct flb_input_chunk *old_ic;
bytes_remained = o_ins->total_limit_size -
o_ins->fs_chunks_size -
o_ins->fs_backlog_chunks_size;
mk_list_foreach(head, &ic->in->chunks) {
old_ic = mk_list_entry(head, struct flb_input_chunk, _head);
if (flb_input_chunk_safe_delete(ic, old_ic, o_ins->id) == FLB_FALSE ||
flb_input_chunk_is_task_safe_delete(old_ic->task) == FLB_FALSE) {
continue;
}
bytes_remained += flb_input_chunk_get_real_size(old_ic);
count++;
if (bytes_remained >= (ssize_t) chunk_size) {
enough_space = FLB_TRUE;
break;
}
}
/*
* flb_intput_chunk_count_dropped_chunks(3) will only be called if the chunk will
* be flushing to the output instance passed in and the instance will reach its
* limit after appending the new data. This function will try to count how many
* chunks need to be dropped in order to place the incoing chunk.
*
* Return '0' means that we cannot find a slot to ingest the incoming data.
*/
if (enough_space == FLB_FALSE) {
return 0;
}
return count;
}
/*
* Find a slot in the output instance to append the new data with size chunk_size, it
* will drop the the oldest chunks when the limitation on local disk is reached.
*/
int flb_input_chunk_find_space_new_data(struct flb_input_chunk *ic,
size_t chunk_size, int overlimit)
{
int count;
int result;
ssize_t bytes;
ssize_t old_ic_bytes;
struct mk_list *tmp;
struct mk_list *head;
struct mk_list *head_chunk;
struct flb_output_instance *o_ins;
struct flb_input_chunk *old_ic;
size_t local_release_requirement;
/*
* For each output instances that will be over the limit after adding the new chunk,
* we have to determine how many chunks needs to be removed. We will adjust the
* routes_mask to only route to the output plugin that have enough space after
* deleting some chunks fome the queue.
*/
mk_list_foreach(head, &ic->in->config->outputs) {
count = 0;
o_ins = mk_list_entry(head, struct flb_output_instance, _head);
if ((o_ins->total_limit_size == -1) || ((1 << o_ins->id) & overlimit) == 0 ||
(flb_routes_mask_get_bit(ic->routes_mask, o_ins->id) == 0)) {
continue;
}
local_release_requirement = 0;
result = flb_input_chunk_release_space_compound(
ic, o_ins,
&local_release_requirement,
FLB_FALSE);
if (!result && local_release_requirement == 0) {
/* If this function returned 0 it means the space requirement was
* satisfied solely by releasing chunks in either storage_backlog
* state (segregated or in queue)
*/
continue;
}
/* flb_input_chunk_find_space_new_data_backlog may fail to meet the space
* requirements but it always sets local_release_requirement to the right amount
*/
count = flb_intput_chunk_count_dropped_chunks(ic, o_ins, local_release_requirement);
if (count == 0) {
/*
* The worst scenerio is that we cannot find a space by dropping some
* old chunks for the incoming chunk. We need to adjust the routes_mask
* of the incoming chunk to not flush to that output instance.
*/
flb_error("[input chunk] no enough space in filesystem to buffer "
"chunk %s in plugin %s", flb_input_chunk_get_name(ic), o_ins->name);
flb_routes_mask_clear_bit(ic->routes_mask, o_ins->id);
if (flb_routes_mask_is_empty(ic->routes_mask)) {
bytes = flb_input_chunk_get_size(ic);
if (bytes != 0) {
/*
* Skip newly created chunk as newly created chunk
* hasn't updated the fs_chunks_size yet.
*/
bytes = flb_input_chunk_get_real_size(ic);
o_ins->fs_chunks_size -= bytes;
flb_debug("[input chunk] chunk %s has no output route, "
"remove %ld bytes from fs_chunks_size",
flb_input_chunk_get_name(ic), bytes);
}
}
continue;
}
/*
* Here we need to drop some chunks from the beginning of chunks list.
* Since chunks are stored in a double linked list (mk_list), we are
* able to iterate the list from the beginning and check if the current
* chunk is able to be removed.
*/
mk_list_foreach_safe(head_chunk, tmp, &ic->in->chunks) {
old_ic = mk_list_entry(head_chunk, struct flb_input_chunk, _head);
if (flb_input_chunk_safe_delete(ic, old_ic, o_ins->id) == FLB_FALSE ||
flb_input_chunk_is_task_safe_delete(old_ic->task) == FLB_FALSE) {
continue;
}
old_ic_bytes = flb_input_chunk_get_real_size(old_ic);
/* drop chunk by adjusting the routes_mask */
flb_routes_mask_clear_bit(old_ic->routes_mask, o_ins->id);
o_ins->fs_chunks_size -= old_ic_bytes;
flb_debug("[input chunk] remove route of chunk %s with size %ld bytes to output plugin %s "
"to place the incoming data with size %ld bytes", flb_input_chunk_get_name(old_ic),
old_ic_bytes, o_ins->name, chunk_size);
if (flb_routes_mask_is_empty(old_ic->routes_mask)) {
if (old_ic->task != NULL) {
/*
* If the chunk is referenced by a task and task has no active route,
* we need to destroy the task as well.
*/
if (old_ic->task->users == 0) {
flb_debug("[task] drop task_id %d with no active route from input plugin %s",
old_ic->task->id, ic->in->name);
flb_task_destroy(old_ic->task, FLB_TRUE);
}
}
else {
flb_debug("[input chunk] drop chunk %s with no output route from input plugin %s",
flb_input_chunk_get_name(old_ic), ic->in->name);
flb_input_chunk_destroy(old_ic, FLB_TRUE);
}
}
count--;
if (count == 0) {
/* we have dropped enough chunks to place the incoming chunks */
break;
}
}
}
if (count != 0) {
flb_error("[input chunk] fail to drop enough chunks in order to place new data");
}
return 0;
}
/*
* Returns a non-zero result if any output instances will reach the limit
* after buffering the new data
*/
int flb_input_chunk_has_overlimit_routes(struct flb_input_chunk *ic,
size_t chunk_size)
{
int overlimit = 0;
struct mk_list *head;
struct flb_output_instance *o_ins;
mk_list_foreach(head, &ic->in->config->outputs) {
o_ins = mk_list_entry(head, struct flb_output_instance, _head);
if ((o_ins->total_limit_size == -1) ||
(flb_routes_mask_get_bit(ic->routes_mask, o_ins->id) == 0)) {
continue;
}
flb_debug("[input chunk] chunk %s required %ld bytes and %ld bytes left "
"in plugin %s", flb_input_chunk_get_name(ic), chunk_size,
o_ins->total_limit_size -
o_ins->fs_backlog_chunks_size -
o_ins->fs_chunks_size,
o_ins->name);
if ((o_ins->fs_chunks_size +
o_ins->fs_backlog_chunks_size +
chunk_size) > o_ins->total_limit_size) {
overlimit |= (1 << o_ins->id);
}
}
return overlimit;
}
/* Find a slot for the incoming data to buffer it in local file system
* returns 0 if none of the routes can be written to
*/
int flb_input_chunk_place_new_chunk(struct flb_input_chunk *ic, size_t chunk_size)
{
int overlimit;
overlimit = flb_input_chunk_has_overlimit_routes(ic, chunk_size);
if (overlimit != 0) {
flb_input_chunk_find_space_new_data(ic, chunk_size, overlimit);
}
return !flb_routes_mask_is_empty(ic->routes_mask);
}
/* Create an input chunk using a Chunk I/O */
struct flb_input_chunk *flb_input_chunk_map(struct flb_input_instance *in,
void *chunk)
{
int records = 0;
int tag_len;
int has_routes;
int ret;
uint64_t ts;
char *buf_data;
size_t buf_size;
size_t offset;
ssize_t bytes;
const char *tag_buf;
struct flb_input_chunk *ic;
/* Create context for the input instance */
ic = flb_calloc(1, sizeof(struct flb_input_chunk));
if (!ic) {
flb_errno();
return NULL;
}
ic->busy = FLB_FALSE;
ic->fs_backlog = FLB_TRUE;
ic->chunk = chunk;
ic->in = in;
msgpack_packer_init(&ic->mp_pck, ic, flb_input_chunk_write);
ret = cio_chunk_get_content(ic->chunk, &buf_data, &buf_size);
if (ret != CIO_OK) {
flb_error("[input chunk] error retrieving content for metrics");
flb_free(ic);
return NULL;
}
/* Validate records in the chunk */
ret = flb_mp_validate_chunk(buf_data, buf_size, &records, &offset);
if (ret == -1) {
/* If there are valid records, truncate the chunk size */
if (records <= 0) {
flb_plg_error(in,
"chunk validation failed, data might be corrupted. "
"No valid records found, the chunk will be discarded.");
flb_free(ic);
return NULL;
}
if (records > 0 && offset > 32) {
flb_plg_warn(in,
"chunk validation failed, data might be corrupted. "
"Found %d valid records, failed content starts "
"right after byte %lu. Recovering valid records.",
records, offset);
/* truncate the chunk to recover valid records */
cio_chunk_write_at(chunk, offset, NULL, 0);
}
else {
flb_plg_error(in,
"chunk validation failed, data might be corrupted. "
"Found %d valid records, failed content starts "
"right after byte %lu. Cannot recover chunk,",
records, offset);
flb_free(ic);
return NULL;
}
}
if (records == 0) {
flb_plg_error(in,
"chunk validation failed, data might be corrupted. "
"No valid records found, the chunk will be discarded.");
flb_free(ic);
return NULL;
}
/*
* If the content is valid and the chunk has extra padding zeros, just
* perform an adjustment.
*/
bytes = cio_chunk_get_content_size(chunk);
if (bytes == -1) {
flb_free(ic);
return NULL;
}
if (offset < bytes) {
cio_chunk_write_at(chunk, offset, NULL, 0);
}
/* Updat metrics */
#ifdef FLB_HAVE_METRICS
ic->total_records = records;
if (ic->total_records > 0) {
/* timestamp */
ts = cmt_time_now();
/* fluentbit_input_records_total */
cmt_counter_add(in->cmt_records, ts, ic->total_records,
1, (char *[]) {(char *) flb_input_name(in)});
/* fluentbit_input_bytes_total */
cmt_counter_add(in->cmt_bytes, ts, buf_size,
1, (char *[]) {(char *) flb_input_name(in)});
/* OLD metrics */
flb_metrics_sum(FLB_METRIC_N_RECORDS, ic->total_records, in->metrics);
flb_metrics_sum(FLB_METRIC_N_BYTES, buf_size, in->metrics);
}
#endif
/* Get the the tag reference (chunk metadata) */
ret = flb_input_chunk_get_tag(ic, &tag_buf, &tag_len);
if (ret == -1) {
flb_error("[input chunk] error retrieving tag of input chunk");
flb_free(ic);
return NULL;
}
bytes = flb_input_chunk_get_real_size(ic);
if (bytes < 0) {
flb_warn("[input chunk] could not retrieve chunk real size");
flb_free(ic);
return NULL;
}
has_routes = flb_routes_mask_set_by_tag(ic->routes_mask, tag_buf, tag_len, in);
if (has_routes == 0) {
flb_warn("[input chunk] no matching route for backoff log chunk %s",
flb_input_chunk_get_name(ic));
}
mk_list_add(&ic->_head, &in->chunks);
flb_input_chunk_update_output_instances(ic, bytes);
return ic;
}
struct flb_input_chunk *flb_input_chunk_create(struct flb_input_instance *in,
const char *tag, int tag_len)
{
int ret;
int err;
int set_down = FLB_FALSE;
int has_routes;
char name[64];
struct cio_chunk *chunk;
struct flb_storage_input *storage;
struct flb_input_chunk *ic;
storage = in->storage;
/* chunk name */
generate_chunk_name(in, name, sizeof(name) - 1);
/* open/create target chunk file */
chunk = cio_chunk_open(storage->cio, storage->stream, name,
CIO_OPEN, FLB_INPUT_CHUNK_SIZE, &err);
if (!chunk) {
flb_error("[input chunk] could not create chunk file: %s:%s",
storage->stream->name, name);
return NULL;
}
/*
* If the returned chunk at open is 'down', just put it up, write the
* content and set it down again.
*/
ret = cio_chunk_is_up(chunk);
if (ret == CIO_FALSE) {
ret = cio_chunk_up_force(chunk);
if (ret == -1) {
cio_chunk_close(chunk, CIO_TRUE);
return NULL;
}
set_down = FLB_TRUE;
}
/* write metadata (tag) */
if (tag_len > 65535) {
/* truncate length */
tag_len = 65535;
}
/* Write tag into metadata section */
ret = cio_meta_write(chunk, (char *) tag, tag_len);
if (ret == -1) {
flb_error("[input chunk] could not write metadata");
cio_chunk_close(chunk, CIO_TRUE);
return NULL;
}
/* Create context for the input instance */
ic = flb_calloc(1, sizeof(struct flb_input_chunk));
if (!ic) {
flb_errno();
cio_chunk_close(chunk, CIO_TRUE);
return NULL;
}
/*
* Check chunk content type to be created: depending of the value set by
* the input plugin, this can be FLB_INPUT_LOGS or FLB_INPUT_METRICS
*/
ic->event_type = in->event_type;
ic->busy = FLB_FALSE;
ic->chunk = chunk;
ic->fs_backlog = FLB_FALSE;
ic->in = in;
ic->stream_off = 0;
ic->task = NULL;
#ifdef FLB_HAVE_METRICS
ic->total_records = 0;
#endif
/* Calculate the routes_mask for the input chunk */
has_routes = flb_routes_mask_set_by_tag(ic->routes_mask, tag, tag_len, in);
if (has_routes == 0) {
flb_trace("[input chunk] no matching route for input chunk '%s' with tag '%s'",
flb_input_chunk_get_name(ic), tag);
}
msgpack_packer_init(&ic->mp_pck, ic, flb_input_chunk_write);
mk_list_add(&ic->_head, &in->chunks);
if (set_down == FLB_TRUE) {
cio_chunk_down(chunk);
}
if (flb_input_event_type_is_log(in)) {
flb_hash_add(in->ht_log_chunks, tag, tag_len, ic, 0);
}
else if (flb_input_event_type_is_metric(in)) {
flb_hash_add(in->ht_metric_chunks, tag, tag_len, ic, 0);
}
return ic;
}
int flb_input_chunk_destroy(struct flb_input_chunk *ic, int del)
{
int tag_len;
int ret;
ssize_t bytes;
const char *tag_buf = NULL;
struct mk_list *head;
struct flb_output_instance *o_ins;
if (flb_input_chunk_is_up(ic) == FLB_FALSE) {
flb_input_chunk_set_up(ic);
}
mk_list_foreach(head, &ic->in->config->outputs) {
o_ins = mk_list_entry(head, struct flb_output_instance, _head);
if (o_ins->total_limit_size == -1) {
continue;
}
bytes = flb_input_chunk_get_real_size(ic);
if (bytes == -1) {
// no data in the chunk
continue;
}
if (flb_routes_mask_get_bit(ic->routes_mask, o_ins->id) != 0) {
o_ins->fs_chunks_size -= bytes;
flb_debug("[input chunk] remove chunk %s with %ld bytes from plugin %s, "
"the updated fs_chunks_size is %ld bytes", flb_input_chunk_get_name(ic),
bytes, o_ins->name, o_ins->fs_chunks_size);
}
}
/*
* When a chunk is going to be destroyed, this can be in a down state,
* since the next step is to retrieve the Tag we need to have the
* content up.
*/
ret = flb_input_chunk_is_up(ic);
if (ret == FLB_FALSE) {
ret = cio_chunk_up_force(ic->chunk);
if (ret == -1) {
flb_error("[input chunk] cannot load chunk: %s",
flb_input_chunk_get_name(ic));
}
}
/* Retrieve Tag */
ret = flb_input_chunk_get_tag(ic, &tag_buf, &tag_len);
if (ret == -1) {
flb_trace("[input chunk] could not retrieve chunk tag: %s",
flb_input_chunk_get_name(ic));
}
if (del == CIO_TRUE && tag_buf) {
/*
* "TRY" to delete any reference to this chunk ('ic') from the hash
* table. Note that maybe the value is not longer available in the
* entries if it was replaced: note that we always keep the last
* chunk for a specific Tag.
*/
if (ic->event_type == FLB_INPUT_LOGS) {
flb_hash_del_ptr(ic->in->ht_log_chunks,
tag_buf, tag_len, (void *) ic);
}
else if (ic->event_type == FLB_INPUT_METRICS) {
flb_hash_del_ptr(ic->in->ht_metric_chunks,
tag_buf, tag_len, (void *) ic);
}
}
cio_chunk_close(ic->chunk, del);
mk_list_del(&ic->_head);
flb_free(ic);
return 0;
}
/* Return or create an available chunk to write data */
static struct flb_input_chunk *input_chunk_get(struct flb_input_instance *in,
const char *tag, int tag_len,
size_t chunk_size, int *set_down)
{
int id = -1;
int ret;
int new_chunk = FLB_FALSE;
size_t out_size;
struct flb_input_chunk *ic = NULL;
if (in->event_type == FLB_INPUT_LOGS) {
id = flb_hash_get(in->ht_log_chunks, tag, tag_len,
(void *) &ic, &out_size);
}
else if (in->event_type == FLB_INPUT_METRICS) {
id = flb_hash_get(in->ht_metric_chunks, tag, tag_len,
(void *) &ic, &out_size);
}
if (id >= 0) {
if (ic->busy == FLB_TRUE || cio_chunk_is_locked(ic->chunk)) {
ic = NULL;
}
else if (cio_chunk_is_up(ic->chunk) == CIO_FALSE) {
ret = cio_chunk_up_force(ic->chunk);
if (ret == -1) {
ic = NULL;
}
*set_down = FLB_TRUE;
}
}
/* No chunk was found, we need to create a new one */
if (!ic) {
ic = flb_input_chunk_create(in, (char *) tag, tag_len);
new_chunk = FLB_TRUE;
if (!ic) {
return NULL;
}
}
/*
* If buffering this block of data will exceed one of the limit among all output instances
* that the chunk will flush to, we need to modify the routes_mask of the oldest chunks
* (based in creation time) to get enough space for the incoming chunk.
*/
if (!flb_routes_mask_is_empty(ic->routes_mask)
&& flb_input_chunk_place_new_chunk(ic, chunk_size) == 0) {
/*
* If the chunk is not newly created, the chunk might already have logs inside.
* We cannot delete (reused) chunks here.
* If the routes_mask is cleared after trying to append new data, we destroy
* the chunk.
*/
if (new_chunk || flb_routes_mask_is_empty(ic->routes_mask) == FLB_TRUE) {
flb_input_chunk_destroy(ic, FLB_TRUE);
}
return NULL;
}
return ic;
}
static inline int flb_input_chunk_is_mem_overlimit(struct flb_input_instance *i)
{
if (i->mem_buf_limit <= 0) {
return FLB_FALSE;
}
if (i->mem_chunks_size >= i->mem_buf_limit) {
return FLB_TRUE;
}
return FLB_FALSE;
}
static inline int flb_input_chunk_is_storage_overlimit(struct flb_input_instance *i)
{
struct flb_storage_input *storage = (struct flb_storage_input *)i->storage;
if (storage->type == CIO_STORE_FS) {
if (i->storage_pause_on_chunks_overlimit == FLB_TRUE) {
if (storage->cio->total_chunks >= storage->cio->max_chunks_up) {
return FLB_TRUE;
}
}
}
return FLB_FALSE;
}
/*
* Check all chunks associated to the input instance and summarize
* the number of bytes in use.
*/
size_t flb_input_chunk_total_size(struct flb_input_instance *in)
{
size_t total = 0;
struct flb_storage_input *storage;
storage = (struct flb_storage_input *) in->storage;
total = cio_stream_size_chunks_up(storage->stream);
return total;
}
/*
* Count and update the number of bytes being used by the instance. Also
* check if the instance is paused, if so, check if it can be resumed if
* is not longer over the limits.
*
* It always returns the number of bytes in use.
*/
size_t flb_input_chunk_set_limits(struct flb_input_instance *in)
{
size_t total;
/* Gather total number of enqueued bytes */
total = flb_input_chunk_total_size(in);
/* Register the total into the context variable */
in->mem_chunks_size = total;
/*
* After the adjustments, validate if the plugin is overlimit or paused
* and perform further adjustments.
*/
if (flb_input_chunk_is_mem_overlimit(in) == FLB_FALSE &&
in->config->is_running == FLB_TRUE &&
in->config->is_ingestion_active == FLB_TRUE &&
in->mem_buf_status == FLB_INPUT_PAUSED) {
in->mem_buf_status = FLB_INPUT_RUNNING;
if (in->p->cb_resume) {
in->p->cb_resume(in->context, in->config);
flb_info("[input] %s resume (mem buf overlimit)",
in->name);
}
}
if (flb_input_chunk_is_storage_overlimit(in) == FLB_FALSE &&
in->config->is_running == FLB_TRUE &&
in->config->is_ingestion_active == FLB_TRUE &&
in->storage_buf_status == FLB_INPUT_PAUSED) {
in->storage_buf_status = FLB_INPUT_RUNNING;
if (in->p->cb_resume) {
in->p->cb_resume(in->context, in->config);
flb_info("[input] %s resume (storage buf overlimit %d/%d)",
in->name,
((struct flb_storage_input *)in->storage)->cio->total_chunks,
((struct flb_storage_input *)in->storage)->cio->max_chunks_up);
}
}
return total;
}
/*
* If the number of bytes in use by the chunks are over the imposed limit
* by configuration, pause the instance.
*/
static inline int flb_input_chunk_protect(struct flb_input_instance *i)
{
if (flb_input_chunk_is_mem_overlimit(i) == FLB_TRUE) {
flb_warn("[input] %s paused (mem buf overlimit)",
i->name);
if (!flb_input_buf_paused(i)) {
if (i->p->cb_pause) {
i->p->cb_pause(i->context, i->config);
}
}
i->mem_buf_status = FLB_INPUT_PAUSED;
return FLB_TRUE;
}
if (flb_input_chunk_is_storage_overlimit(i) == FLB_TRUE) {
flb_warn("[input] %s paused (storage buf overlimit %d/%d)",
i->name,
((struct flb_storage_input *)i->storage)->cio->total_chunks,
((struct flb_storage_input *)i->storage)->cio->max_chunks_up);
if (!flb_input_buf_paused(i)) {
if (i->p->cb_pause) {
i->p->cb_pause(i->context, i->config);
}
}
i->storage_buf_status = FLB_INPUT_PAUSED;
return FLB_TRUE;
}
return FLB_FALSE;
}
/*
* Validate if the chunk coming from the input plugin based on config and
* resources usage must be 'up' or 'down' (applicable for filesystem storage
* type).
*
* FIXME: can we find a better name for this function ?
*/
int flb_input_chunk_set_up_down(struct flb_input_chunk *ic)
{
size_t total;
struct flb_input_instance *in;
in = ic->in;
/* Gather total number of enqueued bytes */
total = flb_input_chunk_total_size(in);
/* Register the total into the context variable */
in->mem_chunks_size = total;
if (flb_input_chunk_is_mem_overlimit(in) == FLB_TRUE) {
if (cio_chunk_is_up(ic->chunk) == CIO_TRUE) {
cio_chunk_down(ic->chunk);
/* Adjust new counters */
total = flb_input_chunk_total_size(ic->in);
in->mem_chunks_size = total;
return FLB_FALSE;
}
}
return FLB_TRUE;
}
int flb_input_chunk_is_up(struct flb_input_chunk *ic)
{
return cio_chunk_is_up(ic->chunk);
}
int flb_input_chunk_down(struct flb_input_chunk *ic)
{
if (cio_chunk_is_up(ic->chunk) == CIO_TRUE) {
return cio_chunk_down(ic->chunk);
}
return 0;
}
int flb_input_chunk_set_up(struct flb_input_chunk *ic)
{
if (cio_chunk_is_up(ic->chunk) == CIO_FALSE) {
return cio_chunk_up(ic->chunk);
}
return 0;
}
/* Append a RAW MessagPack buffer to the input instance */
int flb_input_chunk_append_raw(struct flb_input_instance *in,
const char *tag, size_t tag_len,
const void *buf, size_t buf_size)
{
int ret;
int set_down = FLB_FALSE;
int min;
int meta_size;
int new_chunk = FLB_FALSE;
uint64_t ts;
size_t diff;
size_t size;
size_t pre_size;
struct flb_input_chunk *ic;
struct flb_storage_input *si;
/* Check if the input plugin has been paused */
if (flb_input_buf_paused(in) == FLB_TRUE) {
flb_debug("[input chunk] %s is paused, cannot append records",
in->name);
return -1;
}
if (buf_size == 0) {
flb_debug("[input chunk] skip ingesting data with 0 bytes");
return -1;
}
/*
* Some callers might not set a custom tag, on that case just inherit
* the fixed instance tag or instance name.
*/
if (!tag) {
if (in->tag && in->tag_len > 0) {
tag = in->tag;
tag_len = in->tag_len;
}
else {
tag = in->name;
tag_len = strlen(in->name);
}
}
/*
* Get a target input chunk, can be one with remaining space available
* or a new one.
*/
ic = input_chunk_get(in, tag, tag_len, buf_size, &set_down);
if (!ic) {
flb_error("[input chunk] no available chunk");
return -1;
}
/* newly created chunk */
if (flb_input_chunk_get_size(ic) == 0) {
new_chunk = FLB_TRUE;
}
/* We got the chunk, validate if is 'up' or 'down' */
ret = flb_input_chunk_is_up(ic);
if (ret == FLB_FALSE) {
ret = cio_chunk_up_force(ic->chunk);
if (ret == -1) {
flb_error("[input chunk] cannot retrieve temporary chunk");
return -1;
}
set_down = FLB_TRUE;
}
/*
* Previous size from the chunk, used to calculate the difference
* after filtering
*/
pre_size = cio_chunk_get_content_size(ic->chunk);
/* Write the new data */
ret = flb_input_chunk_write(ic, buf, buf_size);
if (ret == -1) {
flb_error("[input chunk] error writing data from %s instance",
in->name);
cio_chunk_tx_rollback(ic->chunk);
return -1;
}
/* Update 'input' metrics */
#ifdef FLB_HAVE_METRICS
if (ic->total_records > 0) {
/* timestamp */
ts = cmt_time_now();
/* fluentbit_input_records_total */
cmt_counter_add(in->cmt_records, ts, ic->added_records,
1, (char *[]) {(char *) flb_input_name(in)});
/* fluentbit_input_bytes_total */
cmt_counter_add(in->cmt_bytes, ts, buf_size,
1, (char *[]) {(char *) flb_input_name(in)});
/* OLD api */
flb_metrics_sum(FLB_METRIC_N_RECORDS, ic->added_records, in->metrics);
flb_metrics_sum(FLB_METRIC_N_BYTES, buf_size, in->metrics);
}
#endif
/* Apply filters */
if (in->event_type == FLB_INPUT_LOGS) {
flb_filter_do(ic,
buf, buf_size,
tag, tag_len, in->config);
}
/* Get chunk size */
size = cio_chunk_get_content_size(ic->chunk);
/* calculate the 'real' new bytes being added after the filtering phase */
diff = llabs(size - pre_size);
/*
* Update output instance bytes counters, note that bytes counter should
* always count the chunk size in the file system. Therefore, it should
* add the extra bytes for the metadata.
*/
meta_size = cio_meta_size(ic->chunk);
if (new_chunk == FLB_TRUE) {
diff += meta_size
/* See https://github.com/edsiper/chunkio#file-layout for more details */
+ 2 /* HEADER BYTES */
+ 4 /* CRC32 */
+ 16 /* PADDING */
+ 2; /* METADATA LENGTH BYTES */
}
/*
* There is a case that rewrite_tag will modify the tag and keep rule is set
* to drop the original record. The original record will still go through the
* flb_input_chunk_update_output_instances(2) to update the fs_chunks_size by
* metadata bytes (consisted by metadata bytes of the file chunk). This condition
* sets the diff to 0 in order to not update the fs_chunks_size.
*/
if (flb_input_chunk_get_size(ic) == 0) {
diff = 0;
}
if (diff != 0) {
flb_input_chunk_update_output_instances(ic, diff);
}
/* Lock buffers where size > 2MB */
if (size > FLB_INPUT_CHUNK_FS_MAX_SIZE) {
cio_chunk_lock(ic->chunk);
}
/* Make sure the data was not filtered out and the buffer size is zero */
if (size == 0) {
flb_input_chunk_destroy(ic, FLB_TRUE);
flb_input_chunk_set_limits(in);
return 0;
}
#ifdef FLB_HAVE_STREAM_PROCESSOR
else if (in->config->stream_processor_ctx &&
ic->event_type == FLB_INPUT_LOGS) {
char *c_data;
size_t c_size;
/* Retrieve chunk (filtered) output content */
cio_chunk_get_content(ic->chunk, &c_data, &c_size);
/* Invoke stream processor */
flb_sp_do(in->config->stream_processor_ctx,
in,
tag, tag_len,
c_data + ic->stream_off, c_size - ic->stream_off);
ic->stream_off += (c_size - ic->stream_off);
}
#endif
if (set_down == FLB_TRUE) {
cio_chunk_down(ic->chunk);
}
/*
* If the instance is not routable, there is no need to keep the
* content in the storage engine, just get rid of it.
*/
if (in->routable == FLB_FALSE) {
flb_input_chunk_destroy(ic, FLB_TRUE);
return 0;
}
/* Update memory counters and adjust limits if any */
flb_input_chunk_set_limits(in);
/*
* Check if we are overlimit and validate if is there any filesystem
* storage type asociated to this input instance, if so, unload the
* chunk content from memory to respect imposed limits.
*
* Calling cio_chunk_down() the memory map associated and the file
* descriptor will be released. At any later time, it must be bring up
* for I/O operations.
*/
si = (struct flb_storage_input *) in->storage;
if (flb_input_chunk_is_mem_overlimit(in) == FLB_TRUE &&
si->type == CIO_STORE_FS) {
if (cio_chunk_is_up(ic->chunk) == CIO_TRUE) {
/*
* If we are already over limit, a sub-sequent data ingestion
* might need a Chunk to write data in. As an optimization we
* will put this Chunk down ONLY IF it has less than 1% of
* it capacity as available space, otherwise keep it 'up' so
* it available space can be used.
*/
size = cio_chunk_get_content_size(ic->chunk);
/* Do we have less than 1% available ? */
min = (FLB_INPUT_CHUNK_FS_MAX_SIZE * 0.01);
if (FLB_INPUT_CHUNK_FS_MAX_SIZE - size < min) {
cio_chunk_down(ic->chunk);
}
}
}
flb_input_chunk_protect(in);
return 0;
}
/* Retrieve a raw buffer from a dyntag node */
const void *flb_input_chunk_flush(struct flb_input_chunk *ic, size_t *size)
{
int ret;
char *buf = NULL;
if (cio_chunk_is_up(ic->chunk) == CIO_FALSE) {
ret = cio_chunk_up(ic->chunk);
if (ret == -1) {
return NULL;
}
}
/*
* msgpack-c internal use a raw buffer for it operations, since we
* already appended data we just can take out the references to avoid
* a new memory allocation and skip a copy operation.
*/
ret = cio_chunk_get_content(ic->chunk, &buf, size);
if (ret == -1) {
flb_error("[input chunk] error retrieving chunk content");
return NULL;
}
if (!buf) {
*size = 0;
return NULL;
}
/* Set it busy as it likely it's a reference for an outgoing task */
ic->busy = FLB_TRUE;
/* Lock the internal chunk */
cio_chunk_lock(ic->chunk);
return buf;
}
int flb_input_chunk_release_lock(struct flb_input_chunk *ic)
{
if (ic->busy == FLB_FALSE) {
return -1;
}
ic->busy = FLB_FALSE;
return 0;
}
flb_sds_t flb_input_chunk_get_name(struct flb_input_chunk *ic)
{
struct cio_chunk *ch;
ch = (struct cio_chunk *) ic->chunk;
return ch->name;
}
int flb_input_chunk_get_tag(struct flb_input_chunk *ic,
const char **tag_buf, int *tag_len)
{
int len;
int ret;
char *buf;
ret = cio_meta_read(ic->chunk, &buf, &len);
if (ret == -1) {
*tag_len = -1;
*tag_buf = NULL;
return -1;
}
*tag_len = len;
*tag_buf = buf;
return ret;
}
/*
* Iterates all output instances that the chunk will be flushing to and summarize
* the total number of bytes in use after ingesting the new data.
*/
void flb_input_chunk_update_output_instances(struct flb_input_chunk *ic,
size_t chunk_size)
{
struct mk_list *head;
struct flb_output_instance *o_ins;
/* for each output plugin, we update the fs_chunks_size */
mk_list_foreach(head, &ic->in->config->outputs) {
o_ins = mk_list_entry(head, struct flb_output_instance, _head);
if (o_ins->total_limit_size == -1) {
continue;
}
if (flb_routes_mask_get_bit(ic->routes_mask, o_ins->id) != 0) {
/*
* if there is match on any index of 1's in the binary, it indicates
* that the input chunk will flush to this output instance
*/
o_ins->fs_chunks_size += chunk_size;
flb_debug("[input chunk] chunk %s update plugin %s fs_chunks_size by %ld bytes, "
"the current fs_chunks_size is %ld bytes", flb_input_chunk_get_name(ic),
o_ins->name, chunk_size, o_ins->fs_chunks_size);
}
}
}
| 1 | 15,802 | Let's get rid of these for the final version and if you get rid of the macro I left last time I'd be more than grateful. | fluent-fluent-bit | c |
@@ -36,6 +36,8 @@ public class MetadataColumns {
Integer.MAX_VALUE - 1, "_file", Types.StringType.get(), "Path of the file in which a row is stored");
public static final NestedField ROW_POSITION = NestedField.required(
Integer.MAX_VALUE - 2, "_pos", Types.LongType.get(), "Ordinal position of a row in the source data file");
+ public static final NestedField DELETE_MARK = NestedField.required(
+ Integer.MAX_VALUE - 3, "_deleted", Types.BooleanType.get(), "Delete mark denotes whether row is deleted or not");
// IDs Integer.MAX_VALUE - (101-200) are used for reserved columns
public static final NestedField DELETE_FILE_PATH = NestedField.required( | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.Map;
import java.util.Set;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.types.Types.NestedField;
public class MetadataColumns {
private MetadataColumns() {
}
// IDs Integer.MAX_VALUE - (1-100) are used for metadata columns
public static final NestedField FILE_PATH = NestedField.required(
Integer.MAX_VALUE - 1, "_file", Types.StringType.get(), "Path of the file in which a row is stored");
public static final NestedField ROW_POSITION = NestedField.required(
Integer.MAX_VALUE - 2, "_pos", Types.LongType.get(), "Ordinal position of a row in the source data file");
// IDs Integer.MAX_VALUE - (101-200) are used for reserved columns
public static final NestedField DELETE_FILE_PATH = NestedField.required(
Integer.MAX_VALUE - 101, "file_path", Types.StringType.get(), "Path of a file in which a deleted row is stored");
public static final NestedField DELETE_FILE_POS = NestedField.required(
Integer.MAX_VALUE - 102, "pos", Types.LongType.get(), "Ordinal position of a deleted row in the data file");
public static final int DELETE_FILE_ROW_FIELD_ID = Integer.MAX_VALUE - 103;
public static final String DELETE_FILE_ROW_DOC = "Deleted row values";
private static final Map<String, NestedField> META_COLUMNS = ImmutableMap.of(
FILE_PATH.name(), FILE_PATH,
ROW_POSITION.name(), ROW_POSITION);
private static final Set<Integer> META_IDS = META_COLUMNS.values().stream().map(NestedField::fieldId)
.collect(ImmutableSet.toImmutableSet());
public static Set<Integer> metadataFieldIds() {
return META_IDS;
}
public static NestedField get(String name) {
return META_COLUMNS.get(name);
}
public static boolean isMetadataColumn(String name) {
return META_COLUMNS.containsKey(name);
}
public static boolean nonMetadataColumn(String name) {
return !META_COLUMNS.containsKey(name);
}
}
| 1 | 36,760 | Instead of `DELETE_MARK`, how about `IS_DELETED`? I don't think that "mark" is clear enough to describe what this is. Similarly, I think the docs should be "Whether the row has been deleted". There's no need to include "delete mark" because that's identifying something that is not defined (this column is _deleted and "mark" is not introduced), and "or not" is unnecessary because it is implied by "whether". | apache-iceberg | java |
@@ -179,6 +179,19 @@ def init_worker(checked_num, action_num):
progress_actions = action_num
+def get_dependent_headers(buildaction, archive):
+ LOG.debug("Generating dependent headers via compiler...")
+ try:
+ dependencies = set(create_dependencies(buildaction))
+ except Exception as ex:
+ LOG.debug("Couldn't create dependencies:")
+ LOG.debug(str(ex))
+ # TODO append with buildaction
+ archive.writestr("no-sources", str(ex))
+ dependencies = set()
+ return dependencies
+
+
def check(check_data):
"""
Invoke clang with an action which called by processes. | 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
"""
"""
from collections import defaultdict
import codecs
import glob
import multiprocessing
import os
import signal
import sys
import traceback
import zipfile
from libcodechecker import util
from libcodechecker.analyze import analyzer_env
from libcodechecker.analyze import plist_parser
from libcodechecker.analyze.analyzers import analyzer_types
from libcodechecker.logger import LoggerFactory
LOG = LoggerFactory.get_new_logger('ANALYSIS MANAGER')
def worker_result_handler(results, metadata, output_path):
"""
Print the analysis summary.
"""
if metadata is None:
metadata = {}
successful_analysis = defaultdict(int)
failed_analysis = defaultdict(int)
skipped_num = 0
reanalyzed_num = 0
for res, skipped, reanalyzed, analyzer_type, _ in results:
if skipped:
skipped_num += 1
else:
if reanalyzed:
reanalyzed_num += 1
if res == 0:
successful_analysis[analyzer_type] += 1
else:
failed_analysis[analyzer_type] += 1
LOG.info("----==== Summary ====----")
LOG.info("Total compilation commands: " + str(len(results)))
if successful_analysis:
LOG.info("Successfully analyzed")
for analyzer_type, res in successful_analysis.items():
LOG.info(' ' + analyzer_type + ': ' + str(res))
if failed_analysis:
LOG.info("Failed to analyze")
for analyzer_type, res in failed_analysis.items():
LOG.info(' ' + analyzer_type + ': ' + str(res))
if reanalyzed_num:
LOG.info("Reanalyzed compilation commands: " + str(reanalyzed_num))
if skipped_num:
LOG.info("Skipped compilation commands: " + str(skipped_num))
LOG.info("----=================----")
metadata['successful'] = successful_analysis
metadata['failed'] = failed_analysis
metadata['skipped'] = skipped_num
# check() created the result .plist files and additional, per-analysis
# meta information in forms of .plist.source files.
# We now soak these files into the metadata dict, as they are not needed
# as loose files on the disk... but synchronizing LARGE dicts between
# threads would be more error prone.
source_map = {}
for f in glob.glob(os.path.join(output_path, "*.source")):
with open(f, 'r') as sfile:
source_map[f[:-7]] = sfile.read().strip()
os.remove(f)
for f in glob.glob(os.path.join(output_path, 'failed', "*.error")):
err_file, _ = os.path.splitext(f)
plist_file = os.path.basename(err_file) + ".plist"
plist_file = os.path.join(output_path, plist_file)
metadata['result_source_files'].pop(plist_file, None)
metadata['result_source_files'].update(source_map)
def create_dependencies(action):
"""
Transforms the given original build 'command' to a command that, when
executed, is able to generate a dependency list.
"""
def __eliminate_argument(arg_vect, opt_strings, num_args=0):
"""
This call eliminates the parameters matching the given option strings,
along with the number of arguments coming directly after the opt-string
from the command.
"""
option_index = next(
(i for i, c in enumerate(arg_vect) if c in opt_strings), None)
if option_index:
arg_vect = arg_vect[0:option_index] + \
arg_vect[option_index + num_args + 1:]
return arg_vect
if 'CC_LOGGER_GCC_LIKE' not in os.environ:
os.environ['CC_LOGGER_GCC_LIKE'] = 'gcc:g++:clang:clang++:cc:c++'
command = action.original_command.split(' ')
if any(binary_substring in command[0] for binary_substring
in os.environ['CC_LOGGER_GCC_LIKE'].split(':')):
# gcc and clang can generate makefile-style dependency list.
# If an output file is set, the dependency is not written to the
# standard output but rather into the given file.
# We need to first eliminate the output from the command.
command = __eliminate_argument(command, ['-o', '--output'], 1)
# Remove potential dependency-file-generator options from the string
# too. These arguments found in the logged build command would derail
# us and generate dependencies, e.g. into the build directory used.
command = __eliminate_argument(command, ['-MM'])
command = __eliminate_argument(command, ['-MF'], 1)
command = __eliminate_argument(command, ['-MP'])
command = __eliminate_argument(command, ['-MT'], 1)
command = __eliminate_argument(command, ['-MQ'], 1)
command = __eliminate_argument(command, ['-MD'])
command = __eliminate_argument(command, ['-MMD'])
# Clang contains some extra options.
command = __eliminate_argument(command, ['-MJ'], 1)
command = __eliminate_argument(command, ['-MV'])
# Build out custom invocation for dependency generation.
command = [command[0], '-E', '-M', '-MT', '__dummy'] + command[1:]
LOG.debug("Crafting build dependencies from GCC or Clang!")
output, rc = util.call_command(command,
env=os.environ,
cwd=action.directory)
output = codecs.decode(output, 'utf-8', 'replace')
if rc == 0:
# Parse 'Makefile' syntax dependency output.
dependencies = output.replace('__dummy: ', '') \
.replace('\\', '') \
.replace(' ', '') \
.replace(' ', '\n')
# The dependency list already contains the source file's path.
return [dep for dep in dependencies.split('\n') if dep != ""]
else:
raise IOError("Failed to generate dependency list for " +
' '.join(command) + "\n\nThe original output was: " +
output)
else:
raise ValueError("Cannot generate dependency list for build "
"command '" + ' '.join(command) + "'")
# Progress reporting.
progress_checked_num = None
progress_actions = None
def init_worker(checked_num, action_num):
global progress_checked_num, progress_actions
progress_checked_num = checked_num
progress_actions = action_num
def check(check_data):
"""
Invoke clang with an action which called by processes.
Different analyzer object belongs to for each build action.
skiplist handler is None if no skip file was configured.
"""
action, context, analyzer_config_map, \
output_dir, skip_handler, quiet_output_on_stdout, \
capture_analysis_output = check_data
skipped = False
reanalyzed = False
try:
# If one analysis fails the check fails.
return_codes = 0
skipped = False
result_file = ''
for source in action.sources:
# If there is no skiplist handler there was no skip list file
# in the command line.
# C++ file skipping is handled here.
source_file_name = os.path.basename(source)
if skip_handler and skip_handler.should_skip(source):
LOG.debug_analyzer(source_file_name + ' is skipped')
skipped = True
continue
# Escape the spaces in the source path, but make sure not to
# over-escape already escaped spaces.
if ' ' in source:
space_locations = [i for i, c in enumerate(source) if c == ' ']
# If a \ is added to the text, the following indexes must be
# shifted by one.
rolling_offset = 0
for orig_idx in space_locations:
idx = orig_idx + rolling_offset
if idx != 0 and source[idx - 1] != '\\':
source = source[:idx] + '\ ' + source[idx + 1:]
rolling_offset += 1
# Construct analyzer env.
analyzer_environment = analyzer_env.get_check_env(
context.path_env_extra,
context.ld_lib_path_extra)
# Create a source analyzer.
source_analyzer = \
analyzer_types.construct_analyzer(action,
analyzer_config_map)
# Source is the currently analyzed source file
# there can be more in one buildaction.
source_analyzer.source_file = source
# The result handler for analysis is an empty result handler
# which only returns metadata, but can't process the results.
rh = analyzer_types.construct_analyze_handler(action,
output_dir,
context.severity_map,
skip_handler)
rh.analyzed_source_file = source
if os.path.exists(rh.analyzer_result_file):
reanalyzed = True
# Fills up the result handler with the analyzer information.
source_analyzer.analyze(rh, analyzer_environment)
# If source file contains escaped spaces ("\ " tokens), then
# clangSA writes the plist file with removing this escape
# sequence, whereas clang-tidy does not. We rewrite the file
# names to contain no escape sequences for every analyzer.
result_file = rh.analyzer_result_file.replace(r'\ ', ' ')
result_base = os.path.basename(result_file)
failed_dir = os.path.join(output_dir, "failed")
if rh.analyzer_returncode == 0:
# Analysis was successful processing results.
if capture_analysis_output:
success_dir = os.path.join(output_dir, "success")
if not os.path.exists(success_dir):
os.makedirs(success_dir)
if len(rh.analyzer_stdout) > 0:
if not quiet_output_on_stdout:
LOG.debug_analyzer('\n' + rh.analyzer_stdout)
if capture_analysis_output:
with open(os.path.join(success_dir, result_base) +
".stdout.txt", 'w') as outf:
outf.write(rh.analyzer_stdout)
if len(rh.analyzer_stderr) > 0:
if not quiet_output_on_stdout:
LOG.debug_analyzer('\n' + rh.analyzer_stderr)
if capture_analysis_output:
with open(os.path.join(success_dir, result_base) +
".stderr.txt", 'w') as outf:
outf.write(rh.analyzer_stderr)
rh.postprocess_result()
# Generated reports will be handled separately at store.
# Save some extra information next to the plist, .source
# acting as an extra metadata file.
with open(result_file + ".source", 'w') as orig:
orig.write(
rh.analyzed_source_file.replace(r'\ ', ' ') + "\n")
if os.path.exists(rh.analyzer_result_file) and \
not os.path.exists(result_file):
os.rename(rh.analyzer_result_file, result_file)
LOG.info("[%d/%d] %s analyzed %s successfully." %
(progress_checked_num.value, progress_actions.value,
action.analyzer_type, source_file_name))
# Remove the previously generated error file.
if os.path.exists(failed_dir):
err_file = os.path.join(failed_dir, result_base + '.zip')
if os.path.exists(err_file):
os.remove(err_file)
if skip_handler:
# We need to check the plist content because skipping
# reports in headers can be done only this way.
plist_parser.skip_report_from_plist(result_file,
skip_handler)
else:
# If the analysis has failed, we help debugging.
if not os.path.exists(failed_dir):
os.makedirs(failed_dir)
LOG.debug("Writing error debugging to '" + failed_dir + "'")
zip_file = result_base + '.zip'
with zipfile.ZipFile(os.path.join(failed_dir, zip_file),
'w') as archive:
if len(rh.analyzer_stdout) > 0:
LOG.debug("[ZIP] Writing analyzer STDOUT to /stdout")
archive.writestr("stdout", rh.analyzer_stdout)
if not quiet_output_on_stdout:
LOG.debug_analyzer('\n' + rh.analyzer_stdout)
if len(rh.analyzer_stderr) > 0:
LOG.debug("[ZIP] Writing analyzer STDERR to /stderr")
archive.writestr("stderr", rh.analyzer_stderr)
if not quiet_output_on_stdout:
LOG.debug_analyzer('\n' + rh.analyzer_stderr)
LOG.debug("Generating dependent headers via compiler...")
try:
dependencies = set(create_dependencies(rh.buildaction))
except Exception as ex:
LOG.debug("Couldn't create dependencies:")
LOG.debug(str(ex))
archive.writestr("no-sources", str(ex))
dependencies = set()
LOG.debug("Fetching other dependent files from analyzer "
"output...")
try:
other_files = set()
if len(rh.analyzer_stdout) > 0:
other_files.update(
source_analyzer.get_analyzer_mentioned_files(
rh.analyzer_stdout))
if len(rh.analyzer_stderr) > 0:
other_files.update(
source_analyzer.get_analyzer_mentioned_files(
rh.analyzer_stderr))
except Exception as ex:
LOG.debug("Couldn't generate list of other files "
"from analyzer output:")
LOG.debug(str(ex))
other_files = set()
dependencies.update(other_files)
LOG.debug("Writing dependent files to archive.")
for dependent_source in dependencies:
dependent_source = os.path.join(action.directory,
dependent_source)
if not os.path.isabs(dependent_source):
dependent_source = \
os.path.abspath(dependent_source)
LOG.debug("[ZIP] Writing '" + dependent_source + "' "
"to the archive.")
archive_path = dependent_source.lstrip('/')
try:
archive.write(
dependent_source,
os.path.join("sources-root",
archive_path),
zipfile.ZIP_DEFLATED)
except Exception as ex:
# In certain cases, the output could contain
# invalid tokens (such as error messages that were
# printed even though the dependency generation
# returned 0).
LOG.debug("[ZIP] Couldn't write, because " +
str(ex))
archive.writestr(
os.path.join("failed-sources-root",
archive_path),
"Couldn't write this file, because:\n" +
str(ex))
LOG.debug("[ZIP] Writing extra information...")
archive.writestr("build-action",
rh.buildaction.original_command)
archive.writestr("analyzer-command",
' '.join(rh.analyzer_cmd))
archive.writestr("return-code",
str(rh.analyzer_returncode))
LOG.debug("ZIP file written at '" +
os.path.join(failed_dir, zip_file) + "'")
LOG.error("Analyzing '" + source_file_name + "' with " +
action.analyzer_type + " failed.")
if rh.analyzer_stdout != '' and not quiet_output_on_stdout:
LOG.error(rh.analyzer_stdout)
if rh.analyzer_stderr != '' and not quiet_output_on_stdout:
LOG.error(rh.analyzer_stderr)
return_codes = rh.analyzer_returncode
# Remove files that successfully analyzed earlier on.
plist_file = result_base + ".plist"
if os.path.exists(plist_file):
os.remove(plist_file)
progress_checked_num.value += 1
return return_codes, skipped, reanalyzed, action.analyzer_type, \
result_file
except Exception as e:
LOG.debug_analyzer(str(e))
traceback.print_exc(file=sys.stdout)
return 1, skipped, reanalyzed, action.analyzer_type, None
def start_workers(actions, context, analyzer_config_map,
jobs, output_path, skip_handler, metadata,
quiet_analyze, capture_analysis_output):
"""
Start the workers in the process pool.
For every build action there is worker which makes the analysis.
"""
# Handle SIGINT to stop this script running.
def signal_handler(*arg, **kwarg):
try:
pool.terminate()
finally:
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
# Start checking parallel.
checked_var = multiprocessing.Value('i', 1)
actions_num = multiprocessing.Value('i', len(actions))
pool = multiprocessing.Pool(jobs,
initializer=init_worker,
initargs=(checked_var,
actions_num))
try:
# Workaround, equivalent of map.
# The main script does not get signal
# while map or map_async function is running.
# It is a python bug, this does not happen if a timeout is specified;
# then receive the interrupt immediately.
analyzed_actions = [(build_action,
context,
analyzer_config_map,
output_path,
skip_handler,
quiet_analyze,
capture_analysis_output)
for build_action in actions]
pool.map_async(check,
analyzed_actions,
1,
callback=lambda results: worker_result_handler(
results, metadata, output_path)
).get(float('inf'))
pool.close()
except Exception:
pool.terminate()
raise
finally:
pool.join()
| 1 | 8,583 | Does it make sense to have this very thin wrapper that only calls `create_dependencies`? And maybe having `create_dependencies` return a set would be better than convert here? | Ericsson-codechecker | c |
@@ -401,7 +401,7 @@ int h2o_get_compressible_types(const h2o_headers_t *headers)
return compressible_types;
}
-h2o_iovec_t h2o_build_destination(h2o_req_t *req, const char *prefix, size_t prefix_len)
+h2o_iovec_t h2o_build_destination(h2o_req_t *req, const char *prefix, size_t prefix_len, int escape)
{
h2o_iovec_t parts[4];
size_t num_parts = 0; | 1 | /*
* Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Satoh Hiroh
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include "h2o.h"
#include "h2o/http1.h"
#include "h2o/http2.h"
struct st_h2o_accept_data_t {
h2o_accept_ctx_t *ctx;
h2o_socket_t *sock;
h2o_timeout_entry_t timeout;
h2o_memcached_req_t *async_resumption_get_req;
struct timeval connected_at;
};
static void on_accept_timeout(h2o_timeout_entry_t *entry);
static struct st_h2o_accept_data_t *create_accept_data(h2o_accept_ctx_t *ctx, h2o_socket_t *sock, struct timeval connected_at)
{
struct st_h2o_accept_data_t *data = h2o_mem_alloc(sizeof(*data));
data->ctx = ctx;
data->sock = sock;
data->timeout = (h2o_timeout_entry_t){};
data->timeout.cb = on_accept_timeout;
h2o_timeout_link(ctx->ctx->loop, &ctx->ctx->handshake_timeout, &data->timeout);
data->async_resumption_get_req = NULL;
data->connected_at = connected_at;
sock->data = data;
return data;
}
static void free_accept_data(struct st_h2o_accept_data_t *data)
{
assert(data->async_resumption_get_req == NULL);
h2o_timeout_unlink(&data->timeout);
free(data);
}
static struct {
h2o_memcached_context_t *memc;
unsigned expiration;
} async_resumption_context;
static void async_resumption_on_get(h2o_iovec_t session_data, void *_accept_data)
{
struct st_h2o_accept_data_t *accept_data = _accept_data;
accept_data->async_resumption_get_req = NULL;
h2o_socket_ssl_resume_server_handshake(accept_data->sock, session_data);
}
static void async_resumption_get(h2o_socket_t *sock, h2o_iovec_t session_id)
{
struct st_h2o_accept_data_t *data = sock->data;
data->async_resumption_get_req =
h2o_memcached_get(async_resumption_context.memc, data->ctx->libmemcached_receiver, session_id, async_resumption_on_get,
data, H2O_MEMCACHED_ENCODE_KEY | H2O_MEMCACHED_ENCODE_VALUE);
}
static void async_resumption_new(h2o_iovec_t session_id, h2o_iovec_t session_data)
{
h2o_memcached_set(async_resumption_context.memc, session_id, session_data,
(uint32_t)time(NULL) + async_resumption_context.expiration,
H2O_MEMCACHED_ENCODE_KEY | H2O_MEMCACHED_ENCODE_VALUE);
}
static void async_resumption_remove(h2o_iovec_t session_id)
{
h2o_memcached_delete(async_resumption_context.memc, session_id, H2O_MEMCACHED_ENCODE_KEY);
}
void h2o_accept_setup_async_ssl_resumption(h2o_memcached_context_t *memc, unsigned expiration)
{
async_resumption_context.memc = memc;
async_resumption_context.expiration = expiration;
h2o_socket_ssl_async_resumption_init(async_resumption_get, async_resumption_new, async_resumption_remove);
}
void on_accept_timeout(h2o_timeout_entry_t *entry)
{
/* TODO log */
struct st_h2o_accept_data_t *data = H2O_STRUCT_FROM_MEMBER(struct st_h2o_accept_data_t, timeout, entry);
if (data->async_resumption_get_req != NULL) {
h2o_memcached_cancel_get(async_resumption_context.memc, data->async_resumption_get_req);
data->async_resumption_get_req = NULL;
}
h2o_socket_t *sock = data->sock;
free_accept_data(data);
h2o_socket_close(sock);
}
static void on_ssl_handshake_complete(h2o_socket_t *sock, const char *err)
{
struct st_h2o_accept_data_t *data = sock->data;
sock->data = NULL;
if (err != NULL) {
h2o_socket_close(sock);
goto Exit;
}
h2o_iovec_t proto = h2o_socket_ssl_get_selected_protocol(sock);
const h2o_iovec_t *ident;
for (ident = h2o_http2_alpn_protocols; ident->len != 0; ++ident) {
if (proto.len == ident->len && memcmp(proto.base, ident->base, proto.len) == 0) {
/* connect as http2 */
h2o_http2_accept(data->ctx, sock, data->connected_at);
goto Exit;
}
}
/* connect as http1 */
h2o_http1_accept(data->ctx, sock, data->connected_at);
Exit:
free_accept_data(data);
}
static ssize_t parse_proxy_line(char *src, size_t len, struct sockaddr *sa, socklen_t *salen)
{
#define CHECK_EOF() \
if (p == end) \
return -2
#define EXPECT_CHAR(ch) \
do { \
CHECK_EOF(); \
if (*p++ != ch) \
return -1; \
} while (0)
#define SKIP_TO_WS() \
do { \
do { \
CHECK_EOF(); \
} while (*p++ != ' '); \
--p; \
} while (0)
char *p = src, *end = p + len;
void *addr;
in_port_t *port;
/* "PROXY "*/
EXPECT_CHAR('P');
EXPECT_CHAR('R');
EXPECT_CHAR('O');
EXPECT_CHAR('X');
EXPECT_CHAR('Y');
EXPECT_CHAR(' ');
/* "TCP[46] " */
CHECK_EOF();
if (*p++ != 'T') {
*salen = 0; /* indicate that no data has been obtained */
goto SkipToEOL;
}
EXPECT_CHAR('C');
EXPECT_CHAR('P');
CHECK_EOF();
switch (*p++) {
case '4':
*salen = sizeof(struct sockaddr_in);
*((struct sockaddr_in *)sa) = (struct sockaddr_in){};
sa->sa_family = AF_INET;
addr = &((struct sockaddr_in *)sa)->sin_addr;
port = &((struct sockaddr_in *)sa)->sin_port;
break;
case '6':
*salen = sizeof(struct sockaddr_in6);
*((struct sockaddr_in6 *)sa) = (struct sockaddr_in6){};
sa->sa_family = AF_INET6;
addr = &((struct sockaddr_in6 *)sa)->sin6_addr;
port = &((struct sockaddr_in6 *)sa)->sin6_port;
break;
default:
return -1;
}
EXPECT_CHAR(' ');
/* parse peer address */
char *addr_start = p;
SKIP_TO_WS();
*p = '\0';
if (inet_pton(sa->sa_family, addr_start, addr) != 1)
return -1;
*p++ = ' ';
/* skip local address */
SKIP_TO_WS();
++p;
/* parse peer port */
char *port_start = p;
SKIP_TO_WS();
*p = '\0';
unsigned short usval;
if (sscanf(port_start, "%hu", &usval) != 1)
return -1;
*port = htons(usval);
*p++ = ' ';
SkipToEOL:
do {
CHECK_EOF();
} while (*p++ != '\r');
CHECK_EOF();
if (*p++ != '\n')
return -2;
return p - src;
#undef CHECK_EOF
#undef EXPECT_CHAR
#undef SKIP_TO_WS
}
static void on_read_proxy_line(h2o_socket_t *sock, const char *err)
{
struct st_h2o_accept_data_t *data = sock->data;
if (err != NULL) {
free_accept_data(data);
h2o_socket_close(sock);
return;
}
struct sockaddr_storage addr;
socklen_t addrlen;
ssize_t r = parse_proxy_line(sock->input->bytes, sock->input->size, (void *)&addr, &addrlen);
switch (r) {
case -1: /* error, just pass the input to the next handler */
break;
case -2: /* incomplete */
return;
default:
h2o_buffer_consume(&sock->input, r);
if (addrlen != 0)
h2o_socket_setpeername(sock, (void *)&addr, addrlen);
break;
}
if (data->ctx->ssl_ctx != NULL) {
h2o_socket_ssl_handshake(sock, data->ctx->ssl_ctx, NULL, on_ssl_handshake_complete);
} else {
struct st_h2o_accept_data_t *data = sock->data;
sock->data = NULL;
h2o_http1_accept(data->ctx, sock, data->connected_at);
free_accept_data(data);
}
}
void h2o_accept(h2o_accept_ctx_t *ctx, h2o_socket_t *sock)
{
struct timeval connected_at = *h2o_get_timestamp(ctx->ctx, NULL, NULL);
if (ctx->expect_proxy_line || ctx->ssl_ctx != NULL) {
create_accept_data(ctx, sock, connected_at);
if (ctx->expect_proxy_line) {
h2o_socket_read_start(sock, on_read_proxy_line);
} else {
h2o_socket_ssl_handshake(sock, ctx->ssl_ctx, NULL, on_ssl_handshake_complete);
}
} else {
h2o_http1_accept(ctx, sock, connected_at);
}
}
size_t h2o_stringify_protocol_version(char *dst, int version)
{
char *p = dst;
if (version < 0x200) {
assert(version <= 0x109);
#define PREFIX "HTTP/1."
memcpy(p, PREFIX, sizeof(PREFIX) - 1);
p += sizeof(PREFIX) - 1;
#undef PREFIX
*p++ = '0' + (version & 0xff);
} else {
#define PROTO "HTTP/2"
memcpy(p, PROTO, sizeof(PROTO) - 1);
p += sizeof(PROTO) - 1;
#undef PROTO
}
*p = '\0';
return p - dst;
}
static void push_one_path(h2o_mem_pool_t *pool, h2o_iovec_vector_t *paths_to_push, h2o_iovec_t *url,
h2o_iovec_t base_path, const h2o_url_scheme_t *input_scheme, h2o_iovec_t input_authority,
const h2o_url_scheme_t *base_scheme, h2o_iovec_t *base_authority)
{
h2o_url_t parsed, resolved;
/* check the authority, and extract absolute path */
if (h2o_url_parse_relative(url->base, url->len, &parsed) != 0)
return;
/* fast-path for abspath form */
if (base_scheme == NULL && parsed.scheme == NULL && parsed.authority.base == NULL && url->len != 0 && url->base[0] == '/') {
h2o_vector_reserve(pool, paths_to_push, paths_to_push->size + 1);
paths_to_push->entries[paths_to_push->size++] = h2o_strdup(pool, url->base, url->len);
return;
}
/* check scheme and authority if given URL contains either of the two, or if base is specified */
h2o_url_t base = {input_scheme, input_authority, {}, base_path, 65535};
if (base_scheme != NULL) {
base.scheme = base_scheme;
base.authority = *base_authority;
}
h2o_url_resolve(pool, &base, &parsed, &resolved);
if (input_scheme != resolved.scheme)
return;
if (!h2o_lcstris(input_authority.base, input_authority.len, resolved.authority.base, resolved.authority.len))
return;
h2o_vector_reserve(pool, paths_to_push, paths_to_push->size + 1);
paths_to_push->entries[paths_to_push->size++] = resolved.path;
}
h2o_iovec_vector_t h2o_extract_push_path_from_link_header(h2o_mem_pool_t *pool, const char *value, size_t value_len, h2o_iovec_t base_path,
const h2o_url_scheme_t *input_scheme, h2o_iovec_t input_authority,
const h2o_url_scheme_t *base_scheme, h2o_iovec_t *base_authority)
{
h2o_iovec_vector_t paths_to_push = {};
h2o_iovec_t iter = h2o_iovec_init(value, value_len), token_value;
const char *token;
size_t token_len;
/* extract URL values from Link: </pushed.css>; rel=preload */
do {
if ((token = h2o_next_token(&iter, ';', &token_len, NULL)) == NULL)
break;
/* first element should be <URL> */
if (!(token_len >= 2 && token[0] == '<' && token[token_len - 1] == '>'))
break;
h2o_iovec_t url = h2o_iovec_init(token + 1, token_len - 2);
/* find rel=preload */
int preload = 0, nopush = 0;
while ((token = h2o_next_token(&iter, ';', &token_len, &token_value)) != NULL &&
!h2o_memis(token, token_len, H2O_STRLIT(","))) {
if (h2o_lcstris(token, token_len, H2O_STRLIT("rel")) &&
h2o_lcstris(token_value.base, token_value.len, H2O_STRLIT("preload"))) {
preload++;
} else if (h2o_lcstris(token, token_len, H2O_STRLIT("nopush"))) {
nopush++;
}
}
if (!nopush && preload)
push_one_path(pool, &paths_to_push, &url, base_path, input_scheme, input_authority, base_scheme, base_authority);
} while (token != NULL);
return paths_to_push;
}
int h2o_get_compressible_types(const h2o_headers_t *headers)
{
size_t header_index;
int compressible_types = 0;
for (header_index = 0; header_index != headers->size; ++header_index) {
const h2o_header_t *header = headers->entries + header_index;
if (H2O_UNLIKELY(header->name == &H2O_TOKEN_ACCEPT_ENCODING->buf)) {
h2o_iovec_t iter = h2o_iovec_init(header->value.base, header->value.len);
const char *token = NULL;
size_t token_len = 0;
while ((token = h2o_next_token(&iter, ',', &token_len, NULL)) != NULL) {
if (h2o_lcstris(token, token_len, H2O_STRLIT("gzip")))
compressible_types |= H2O_COMPRESSIBLE_GZIP;
else if (h2o_lcstris(token, token_len, H2O_STRLIT("br")))
compressible_types |= H2O_COMPRESSIBLE_BROTLI;
}
}
}
return compressible_types;
}
h2o_iovec_t h2o_build_destination(h2o_req_t *req, const char *prefix, size_t prefix_len)
{
h2o_iovec_t parts[4];
size_t num_parts = 0;
int conf_ends_with_slash = req->pathconf->path.base[req->pathconf->path.len - 1] == '/';
int prefix_ends_with_slash = prefix[prefix_len - 1] == '/';
/* destination starts with given prefix */
parts[num_parts++] = h2o_iovec_init(prefix, prefix_len);
/* make adjustments depending on the trailing slashes */
if (conf_ends_with_slash != prefix_ends_with_slash) {
if (conf_ends_with_slash) {
parts[num_parts++] = h2o_iovec_init(H2O_STRLIT("/"));
} else {
if (req->path_normalized.len != req->pathconf->path.len)
parts[num_parts - 1].len -= 1;
}
}
/* append suffix path and query */
parts[num_parts++] = h2o_uri_escape(
&req->pool, req->path_normalized.base + req->pathconf->path.len, req->path_normalized.len - req->pathconf->path.len, "/@:");
if (req->query_at != SIZE_MAX)
parts[num_parts++] = h2o_iovec_init(req->path.base + req->query_at, req->path.len - req->query_at);
return h2o_concat_list(&req->pool, parts, num_parts);
}
/* h2-14 and h2-16 are kept for backwards compatibility, as they are often used */
#define ALPN_ENTRY(s) \
{ \
H2O_STRLIT(s) \
}
#define ALPN_PROTOCOLS_CORE ALPN_ENTRY("h2"), ALPN_ENTRY("h2-16"), ALPN_ENTRY("h2-14")
#define NPN_PROTOCOLS_CORE \
"\x02" \
"h2" \
"\x05" \
"h2-16" \
"\x05" \
"h2-14"
static const h2o_iovec_t http2_alpn_protocols[] = {ALPN_PROTOCOLS_CORE, {}};
const h2o_iovec_t *h2o_http2_alpn_protocols = http2_alpn_protocols;
static const h2o_iovec_t alpn_protocols[] = {ALPN_PROTOCOLS_CORE, {H2O_STRLIT("http/1.1")}, {}};
const h2o_iovec_t *h2o_alpn_protocols = alpn_protocols;
const char *h2o_http2_npn_protocols = NPN_PROTOCOLS_CORE;
const char *h2o_npn_protocols = NPN_PROTOCOLS_CORE "\x08"
"http/1.1";
uint64_t h2o_connection_id = 0;
| 1 | 11,243 | How about renaming `escape` to `use_path_normalized` or something to better indicate that it is a selection between `path` and `path_normalized`? | h2o-h2o | c |
@@ -41,4 +41,8 @@ public interface Accountable {
return Collections.emptyList();
}
+ /**
+ * An accountable that always returns 0
+ */
+ Accountable NULL_ACCOUNTABLE = () -> 0;
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.util;
import java.util.Collection;
import java.util.Collections;
/**
* An object whose RAM usage can be computed.
*
* @lucene.internal
*/
public interface Accountable {
/**
* Return the memory usage of this object in bytes. Negative values are illegal.
*/
long ramBytesUsed();
/**
* Returns nested resources of this class.
* The result should be a point-in-time snapshot (to avoid race conditions).
* @see Accountables
*/
default Collection<Accountable> getChildResources() {
return Collections.emptyList();
}
}
| 1 | 36,969 | Interface members are always `static final` right? | apache-lucene-solr | java |
@@ -0,0 +1,17 @@
+// +build nintendoswitch
+
+package runtime
+
+import (
+ "unsafe"
+)
+
+// Result svcOutputDebugString(const char *str, u64 size)
+//go:export svcOutputDebugString
+func svcOutputDebugString(str *byte, size uint64) uint64
+
+//go:export malloc
+func extalloc(size uintptr) unsafe.Pointer
+
+//export free
+func extfree(ptr unsafe.Pointer) | 1 | 1 | 9,684 | It seems to me that this could be in the runtime_nintendoswitch.go file, or is there a reason to keep it separate? | tinygo-org-tinygo | go |
|
@@ -1857,8 +1857,6 @@ unsigned SwiftExpressionParser::Parse(DiagnosticManager &diagnostic_manager,
log->PutCString(s.c_str());
}
- swift::performSILLinking(sil_module.get());
-
if (m_swift_ast_context->HasErrors()) {
DiagnoseSwiftASTContextError();
return 1; | 1 | //===-- SwiftExpressionParser.cpp -------------------------------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "SwiftExpressionParser.h"
#include "SwiftASTManipulator.h"
#include "SwiftREPLMaterializer.h"
#include "SwiftSILManipulator.h"
#include "SwiftUserExpression.h"
#include "Plugins/ExpressionParser/Swift/SwiftDiagnostic.h"
#include "Plugins/ExpressionParser/Swift/SwiftExpressionVariable.h"
#include "lldb/Core/Module.h"
#include "lldb/Core/ModuleList.h"
#include "lldb/Core/ModuleSpec.h"
#include "lldb/Core/ValueObject.h"
#include "lldb/Expression/DiagnosticManager.h"
#include "lldb/Expression/Expression.h"
#include "lldb/Expression/ExpressionSourceCode.h"
#include "lldb/Expression/IRExecutionUnit.h"
#include "lldb/Symbol/CompileUnit.h"
#include "lldb/Symbol/SymbolFile.h"
#include "lldb/Symbol/SymbolVendor.h"
#include "lldb/Symbol/Type.h"
#include "lldb/Symbol/VariableList.h"
#include "lldb/Target/ExecutionContext.h"
#include "lldb/Target/StackFrame.h"
#include "lldb/Target/SwiftLanguageRuntime.h"
#include "lldb/Target/Target.h"
#include "lldb/Target/Thread.h"
#include "lldb/Utility/Log.h"
#include "lldb/Utility/Stream.h"
#include "llvm-c/Analysis.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/raw_ostream.h"
#include "clang/Basic/Module.h"
#include "clang/Rewrite/Core/RewriteBuffer.h"
#include "swift/AST/ASTContext.h"
#include "swift/AST/DiagnosticEngine.h"
#include "swift/AST/DiagnosticConsumer.h"
#include "swift/AST/IRGenOptions.h"
#include "swift/AST/Module.h"
#include "swift/AST/ModuleLoader.h"
#include "swift/Demangling/Demangle.h"
#include "swift/Basic/PrimarySpecificPaths.h"
#include "swift/Basic/SourceManager.h"
#include "swift/ClangImporter/ClangImporter.h"
#include "swift/Frontend/Frontend.h"
#include "swift/Parse/LocalContext.h"
#include "swift/Parse/PersistentParserState.h"
#include "swift/SIL/SILDebuggerClient.h"
#include "swift/SIL/SILFunction.h"
#include "swift/SIL/SILModule.h"
#include "swift/SILOptimizer/PassManager/Passes.h"
#include "swift/Serialization/SerializedModuleLoader.h"
#include "swift/Subsystems.h"
using namespace lldb_private;
using llvm::make_error;
using llvm::StringError;
using llvm::inconvertibleErrorCode;
SwiftExpressionParser::SwiftExpressionParser(
ExecutionContextScope *exe_scope, Expression &expr,
const EvaluateExpressionOptions &options)
: ExpressionParser(exe_scope, expr, options.GetGenerateDebugInfo()),
m_expr(expr), m_triple(), m_llvm_context(), m_module(),
m_execution_unit_sp(), m_swift_ast_context(NULL), m_sc(),
m_exe_scope(exe_scope), m_stack_frame_wp(), m_options(options) {
assert(expr.Language() == lldb::eLanguageTypeSwift);
// TODO This code is copied from ClangExpressionParser.cpp.
// Factor this out into common code.
lldb::TargetSP target_sp;
if (exe_scope) {
target_sp = exe_scope->CalculateTarget();
lldb::StackFrameSP stack_frame = exe_scope->CalculateStackFrame();
if (stack_frame) {
m_stack_frame_wp = stack_frame;
m_sc = stack_frame->GetSymbolContext(lldb::eSymbolContextEverything);
} else {
m_sc.target_sp = target_sp;
}
}
if (target_sp && target_sp->GetArchitecture().IsValid()) {
std::string triple = target_sp->GetArchitecture().GetTriple().str();
int dash_count = 0;
for (size_t i = 0; i < triple.size(); ++i) {
if (triple[i] == '-')
dash_count++;
if (dash_count == 3) {
triple.resize(i);
break;
}
}
m_triple = triple;
} else {
m_triple = llvm::sys::getDefaultTargetTriple();
}
if (target_sp) {
Status error;
m_swift_ast_context = llvm::cast_or_null<SwiftASTContext>(
target_sp->GetScratchSwiftASTContext(error, *exe_scope, true));
}
}
static void DescribeFileUnit(Stream &s, swift::FileUnit *file_unit) {
s.PutCString("kind = ");
switch (file_unit->getKind()) {
default: { s.PutCString("<unknown>"); }
case swift::FileUnitKind::Source: {
s.PutCString("Source, ");
if (swift::SourceFile *source_file =
llvm::dyn_cast<swift::SourceFile>(file_unit)) {
s.Printf("filename = '%s', ", source_file->getFilename().str().c_str());
s.PutCString("source file kind = ");
switch (source_file->Kind) {
case swift::SourceFileKind::Library:
s.PutCString("Library");
case swift::SourceFileKind::Main:
s.PutCString("Main");
case swift::SourceFileKind::REPL:
s.PutCString("REPL");
case swift::SourceFileKind::SIL:
s.PutCString("SIL");
}
}
} break;
case swift::FileUnitKind::Builtin: {
s.PutCString("Builtin");
} break;
case swift::FileUnitKind::SerializedAST:
case swift::FileUnitKind::ClangModule: {
s.PutCString("SerializedAST, ");
swift::LoadedFile *loaded_file = llvm::cast<swift::LoadedFile>(file_unit);
s.Printf("filename = '%s'", loaded_file->getFilename().str().c_str());
} break;
};
}
// Gets the full module name from the module passed in.
static void GetNameFromModule(swift::ModuleDecl *module, std::string &result) {
result.clear();
if (module) {
const char *name = module->getName().get();
if (!name)
return;
result.append(name);
const clang::Module *clang_module = module->findUnderlyingClangModule();
// At present, there doesn't seem to be any way to get the full module path
// from the Swift side.
if (!clang_module)
return;
for (const clang::Module *cur_module = clang_module->Parent; cur_module;
cur_module = cur_module->Parent) {
if (!cur_module->Name.empty()) {
result.insert(0, 1, '.');
result.insert(0, cur_module->Name);
}
}
}
}
/// Largely lifted from swift::performAutoImport, but serves our own nefarious
/// purposes.
static bool PerformAutoImport(SwiftASTContext &swift_ast_context,
SymbolContext &sc, ExecutionContextScope &exe_scope,
lldb::StackFrameWP &stack_frame_wp,
swift::SourceFile &source_file, bool user_imports,
Status &error) {
Log *log(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_EXPRESSIONS));
const std::vector<ConstString> *cu_modules = nullptr;
CompileUnit *compile_unit = sc.comp_unit;
if (compile_unit)
cu_modules = &compile_unit->GetImportedModules();
llvm::SmallVector<swift::ModuleDecl::ImportedModule, 2> imported_modules;
llvm::SmallVector<std::pair<swift::ModuleDecl::ImportedModule,
swift::SourceFile::ImportOptions>,
2>
additional_imports;
source_file.getImportedModules(imported_modules,
swift::ModuleDecl::ImportFilter::All);
std::set<ConstString> loaded_modules;
auto load_one_module = [&](const ConstString &module_name) {
error.Clear();
if (loaded_modules.count(module_name))
return true;
if (log)
log->Printf("[PerformAutoImport] Importing module %s",
module_name.AsCString());
loaded_modules.insert(module_name);
swift::ModuleDecl *swift_module = nullptr;
lldb::StackFrameSP this_frame_sp(stack_frame_wp.lock());
if (module_name == ConstString(swift_ast_context.GetClangImporter()
->getImportedHeaderModule()
->getName()
.str()))
swift_module =
swift_ast_context.GetClangImporter()->getImportedHeaderModule();
else if (this_frame_sp) {
lldb::ProcessSP process_sp(this_frame_sp->CalculateProcess());
if (process_sp)
swift_module = swift_ast_context.FindAndLoadModule(
module_name, *process_sp.get(), error);
} else
swift_module = swift_ast_context.GetModule(module_name, error);
if (!swift_module || !error.Success() ||
swift_ast_context.HasFatalErrors()) {
if (log)
log->Printf("[PerformAutoImport] Couldn't import module %s: %s",
module_name.AsCString(), error.AsCString());
if (!swift_module || swift_ast_context.HasFatalErrors()) {
return false;
}
}
if (log) {
log->Printf("Importing %s with source files:", module_name.AsCString());
for (swift::FileUnit *file_unit : swift_module->getFiles()) {
StreamString ss;
DescribeFileUnit(ss, file_unit);
log->Printf(" %s", ss.GetData());
}
}
additional_imports.push_back(std::make_pair(
std::make_pair(swift::ModuleDecl::AccessPathTy(), swift_module),
swift::SourceFile::ImportOptions()));
imported_modules.push_back(
std::make_pair(swift::ModuleDecl::AccessPathTy(), swift_module));
return true;
};
if (!user_imports) {
if (!load_one_module(ConstString("Swift")))
return false;
if (cu_modules) {
for (const ConstString &module_name : *cu_modules) {
if (!load_one_module(module_name))
return false;
}
}
} else {
llvm::SmallVector<swift::ModuleDecl::ImportedModule, 2> parsed_imports;
source_file.getImportedModules(parsed_imports,
swift::ModuleDecl::ImportFilter::All);
auto *persistent_expression_state =
sc.target_sp->GetSwiftPersistentExpressionState(exe_scope);
for (auto module_pair : parsed_imports) {
swift::ModuleDecl *module = module_pair.second;
if (module) {
std::string module_name;
GetNameFromModule(module, module_name);
if (!module_name.empty()) {
ConstString module_const_str(module_name);
if (log)
log->Printf("[PerformAutoImport] Performing auto import on found "
"module: %s.\n",
module_name.c_str());
if (!load_one_module(module_const_str))
return false;
// How do we tell we are in REPL or playground mode?
persistent_expression_state->AddHandLoadedModule(module_const_str);
}
}
}
// Finally get the hand-loaded modules from the
// SwiftPersistentExpressionState and load them into this context:
if (!persistent_expression_state->RunOverHandLoadedModules(load_one_module))
return false;
}
source_file.addImports(additional_imports);
return true;
}
class VariableMetadataPersistent
: public SwiftASTManipulatorBase::VariableMetadata {
public:
VariableMetadataPersistent(lldb::ExpressionVariableSP &persistent_variable_sp)
: m_persistent_variable_sp(persistent_variable_sp) {}
static constexpr unsigned Type() { return 'Pers'; }
virtual unsigned GetType() { return Type(); }
lldb::ExpressionVariableSP m_persistent_variable_sp;
};
class VariableMetadataVariable
: public SwiftASTManipulatorBase::VariableMetadata {
public:
VariableMetadataVariable(lldb::VariableSP &variable_sp)
: m_variable_sp(variable_sp) {}
static constexpr unsigned Type() { return 'Vari'; }
virtual unsigned GetType() { return Type(); }
lldb::VariableSP m_variable_sp;
};
static CompilerType ImportType(SwiftASTContext &target_context,
CompilerType source_type) {
SwiftASTContext *swift_ast_ctx =
llvm::dyn_cast_or_null<SwiftASTContext>(source_type.GetTypeSystem());
if (swift_ast_ctx == nullptr)
return CompilerType();
if (swift_ast_ctx == &target_context)
return source_type;
Status error, mangled_error;
CompilerType target_type;
// First try to get the type by using the mangled name,
// That will save the mangling step ImportType would have to do:
ConstString type_name = source_type.GetTypeName();
ConstString mangled_counterpart;
bool found_counterpart = type_name.GetMangledCounterpart(mangled_counterpart);
if (found_counterpart)
target_type = target_context.GetTypeFromMangledTypename(
mangled_counterpart.GetCString(), mangled_error);
if (!target_type.IsValid())
target_type = target_context.ImportType(source_type, error);
return target_type;
}
namespace {
class LLDBNameLookup : public swift::SILDebuggerClient {
public:
LLDBNameLookup(SwiftExpressionParser &parser, swift::SourceFile &source_file,
SwiftExpressionParser::SILVariableMap &variable_map,
SymbolContext &sc, ExecutionContextScope &exe_scope)
: SILDebuggerClient(source_file.getASTContext()), m_parser(parser),
m_log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_EXPRESSIONS)),
m_source_file(source_file), m_variable_map(variable_map), m_sc(sc) {
source_file.getParentModule()->setDebugClient(this);
if (!m_sc.target_sp)
return;
m_persistent_vars =
m_sc.target_sp->GetSwiftPersistentExpressionState(exe_scope);
}
virtual ~LLDBNameLookup() {}
virtual bool shouldGlobalize(swift::Identifier Name, swift::DeclKind Kind) {
if (m_parser.GetOptions().GetREPLEnabled())
return true;
else {
// Extensions have to be globalized, there's no way to mark them as local
// to the function, since their
// name is the name of the thing being extended...
if (Kind == swift::DeclKind::Extension)
return true;
// Operators need to be parsed at the global scope regardless of name.
if (Kind == swift::DeclKind::Func && Name.isOperator())
return true;
const char *name_cstr = Name.get();
if (name_cstr && name_cstr[0] == '$') {
if (m_log)
m_log->Printf("[LLDBNameLookup::shouldGlobalize] Returning true to "
"globalizing %s",
name_cstr);
return true;
}
}
return false;
}
virtual void didGlobalize(swift::Decl *decl) {
swift::ValueDecl *value_decl = swift::dyn_cast<swift::ValueDecl>(decl);
if (value_decl) {
// It seems weird to be asking this again, but some DeclKinds must be
// moved to
// the source-file level to be legal. But we don't want to register them
// with
// lldb unless they are of the kind lldb explicitly wants to globalize.
if (shouldGlobalize(value_decl->getBaseName().getIdentifier(),
value_decl->getKind()))
m_staged_decls.AddDecl(value_decl, false, ConstString());
}
}
virtual bool lookupOverrides(swift::DeclBaseName Name, swift::DeclContext *DC,
swift::SourceLoc Loc, bool IsTypeLookup,
ResultVector &RV) {
static unsigned counter = 0;
unsigned count = counter++;
if (m_log) {
m_log->Printf("[LLDBNameLookup::lookupOverrides(%u)] Searching for %s",
count, Name.getIdentifier().get());
}
return false;
}
virtual bool lookupAdditions(swift::DeclBaseName Name, swift::DeclContext *DC,
swift::SourceLoc Loc, bool IsTypeLookup,
ResultVector &RV) {
static unsigned counter = 0;
unsigned count = counter++;
StringRef NameStr = Name.getIdentifier().str();
if (m_log) {
m_log->Printf("[LLDBNameLookup::lookupAdditions (%u)] Searching for %s",
count, Name.getIdentifier().str().str().c_str());
}
ConstString name_const_str(NameStr);
std::vector<swift::ValueDecl *> results;
// First look up the matching Decl's we've made in this compile, then pass
// that list to the
// persistent decls, which will only add decls it has that are NOT
// equivalent to the decls
// we made locally.
m_staged_decls.FindMatchingDecls(name_const_str, results);
// Next look up persistent decls matching this name. Then, if we are in the
// plain expression parser, and we
// aren't looking at a debugger variable, filter out persistent results of
// the same kind as one found by the
// ordinary lookup mechanism in the parser . The problem
// we are addressing here is the case where the user has entered the REPL
// while in an ordinary debugging session
// to play around. While there, e.g., they define a class that happens to
// have the same name as one in the
// program, then in some other context "expr" will call the class they've
// defined, not the one in the program
// itself would use. Plain "expr" should behave as much like code in the
// program would, so we want to favor
// entities of the same DeclKind & name from the program over ones defined
// in the REPL. For function decls we
// check the interface type and full name so we don't remove overloads that
// don't exist in the current scope.
//
// Note also, we only do this for the persistent decls. Anything in the
// "staged" list has been defined in this
// expr setting and so is more local than local.
if (m_persistent_vars) {
bool skip_results_with_matching_kind =
!(m_parser.GetOptions().GetREPLEnabled() ||
m_parser.GetOptions().GetPlaygroundTransformEnabled() ||
(!NameStr.empty() && NameStr.front() == '$'));
size_t num_external_results = RV.size();
if (skip_results_with_matching_kind && num_external_results > 0) {
std::vector<swift::ValueDecl *> persistent_results;
m_persistent_vars->GetSwiftPersistentDecls(name_const_str,
persistent_results);
size_t num_persistent_results = persistent_results.size();
for (size_t idx = 0; idx < num_persistent_results; idx++) {
swift::ValueDecl *value_decl = persistent_results[idx];
if (!value_decl)
continue;
swift::DeclName value_decl_name = value_decl->getFullName();
swift::DeclKind value_decl_kind = value_decl->getKind();
swift::CanType value_interface_type =
value_decl->getInterfaceType()->getCanonicalType();
bool is_function =
swift::isa<swift::AbstractFunctionDecl>(value_decl);
bool skip_it = false;
for (size_t rv_idx = 0; rv_idx < num_external_results; rv_idx++) {
if (swift::ValueDecl *rv_decl = RV[rv_idx].getValueDecl()) {
if (value_decl_kind == rv_decl->getKind()) {
if (is_function) {
swift::DeclName rv_full_name = rv_decl->getFullName();
if (rv_full_name.matchesRef(value_decl_name)) {
// If the full names match, make sure the interface types
// match:
if (rv_decl->getInterfaceType()->getCanonicalType() ==
value_interface_type)
skip_it = true;
}
} else {
skip_it = true;
}
if (skip_it)
break;
}
}
}
if (!skip_it)
results.push_back(value_decl);
}
} else {
m_persistent_vars->GetSwiftPersistentDecls(name_const_str, results);
}
}
for (size_t idx = 0; idx < results.size(); idx++) {
swift::ValueDecl *value_decl = results[idx];
assert(&DC->getASTContext() ==
&value_decl->getASTContext()); // no import required
RV.push_back(swift::LookupResultEntry(value_decl));
}
return results.size() > 0;
}
virtual swift::SILValue emitLValueForVariable(swift::VarDecl *var,
swift::SILBuilder &builder) {
SwiftSILManipulator manipulator(builder);
swift::Identifier variable_name = var->getName();
ConstString variable_const_string(variable_name.get());
SwiftExpressionParser::SILVariableMap::iterator vi =
m_variable_map.find(variable_const_string.AsCString());
if (vi == m_variable_map.end())
return swift::SILValue();
return manipulator.emitLValueForVariable(var, vi->second);
}
SwiftPersistentExpressionState::SwiftDeclMap &GetStagedDecls() {
return m_staged_decls;
}
virtual swift::Identifier getPreferredPrivateDiscriminator() {
if (m_sc.comp_unit) {
if (lldb_private::Module *module = m_sc.module_sp.get()) {
if (lldb_private::SymbolVendor *symbol_vendor =
module->GetSymbolVendor()) {
std::string private_discriminator_string;
if (symbol_vendor->GetCompileOption("-private-discriminator",
private_discriminator_string,
m_sc.comp_unit)) {
return m_source_file.getASTContext().getIdentifier(
private_discriminator_string);
}
}
}
}
return swift::Identifier();
}
private:
SwiftExpressionParser &m_parser;
Log *m_log;
swift::SourceFile &m_source_file;
SwiftExpressionParser::SILVariableMap &m_variable_map;
SymbolContext m_sc;
SwiftPersistentExpressionState *m_persistent_vars = nullptr;
SwiftPersistentExpressionState::SwiftDeclMap
m_staged_decls; // We stage the decls we are globalize in this map.
// They will get copied over to the SwiftPersistentVariable
// store if the parse succeeds.
};
} // END Anonymous namespace
static void
AddRequiredAliases(Block *block, lldb::StackFrameSP &stack_frame_sp,
SwiftASTContext &swift_ast_context,
SwiftASTManipulator &manipulator,
const Expression::SwiftGenericInfo &generic_info) {
// First, emit the typealias for "$__lldb_context"
do {
if (!block)
break;
Function *function = block->CalculateSymbolContextFunction();
if (!function)
break;
constexpr bool can_create = true;
Block &function_block(function->GetBlock(can_create));
lldb::VariableListSP variable_list_sp(
function_block.GetBlockVariableList(true));
if (!variable_list_sp)
break;
lldb::VariableSP self_var_sp(
variable_list_sp->FindVariable(ConstString("self")));
if (!self_var_sp)
break;
CompilerType self_type;
if (stack_frame_sp) {
lldb::ValueObjectSP valobj_sp =
stack_frame_sp->GetValueObjectForFrameVariable(
self_var_sp, lldb::eNoDynamicValues);
if (valobj_sp)
self_type = valobj_sp->GetCompilerType();
}
if (!self_type.IsValid()) {
if (Type *type = self_var_sp->GetType()) {
self_type = type->GetForwardCompilerType();
}
}
if (!self_type.IsValid() ||
!llvm::isa<SwiftASTContext>(self_type.GetTypeSystem()))
break;
// Import before getting the unbound version, because the unbound version
// may not be in the mangled name map
CompilerType imported_self_type = ImportType(swift_ast_context, self_type);
if (!imported_self_type.IsValid())
break;
// This might be a referenced type, in which case we really want to extend
// the referent:
imported_self_type =
llvm::cast<SwiftASTContext>(imported_self_type.GetTypeSystem())
->GetReferentType(imported_self_type);
// If we are extending a generic class it's going to be a metatype, and we
// have to grab the instance type:
imported_self_type =
llvm::cast<SwiftASTContext>(imported_self_type.GetTypeSystem())
->GetInstanceType(imported_self_type.GetOpaqueQualType());
Flags imported_self_type_flags(imported_self_type.GetTypeInfo());
// If 'self' is the Self archetype, resolve it to the actual metatype it is
if (SwiftASTContext::IsSelfArchetypeType(imported_self_type)) {
SwiftLanguageRuntime *swift_runtime =
stack_frame_sp->GetThread()->GetProcess()->GetSwiftLanguageRuntime();
if (CompilerType concrete_self_type = swift_runtime->GetConcreteType(
stack_frame_sp.get(), ConstString("Self"))) {
if (SwiftASTContext *concrete_self_type_ast_ctx =
llvm::dyn_cast_or_null<SwiftASTContext>(
concrete_self_type.GetTypeSystem())) {
imported_self_type = concrete_self_type_ast_ctx->CreateMetatypeType(
concrete_self_type);
imported_self_type_flags.Reset(imported_self_type.GetTypeInfo());
imported_self_type =
ImportType(swift_ast_context, imported_self_type);
if (imported_self_type_flags.AllSet(lldb::eTypeIsSwift |
lldb::eTypeIsMetatype)) {
imported_self_type = imported_self_type.GetInstanceType();
}
}
}
}
// Get the instance type:
if (imported_self_type_flags.AllSet(lldb::eTypeIsSwift |
lldb::eTypeIsMetatype)) {
imported_self_type = imported_self_type.GetInstanceType();
imported_self_type_flags.Reset(imported_self_type.GetTypeInfo());
}
swift::Type object_type =
swift::Type((swift::TypeBase *)(imported_self_type.GetOpaqueQualType()))
->getWithoutSpecifierType();
if (object_type.getPointer() &&
(object_type.getPointer() != imported_self_type.GetOpaqueQualType()))
imported_self_type = CompilerType(imported_self_type.GetTypeSystem(),
object_type.getPointer());
// If the type of 'self' is a bound generic type, get the unbound version
bool is_generic = imported_self_type_flags.AllSet(lldb::eTypeIsSwift |
lldb::eTypeIsGeneric);
bool is_bound = imported_self_type_flags.AllSet(lldb::eTypeIsSwift |
lldb::eTypeIsBound);
if (is_generic) {
if (is_bound)
imported_self_type = imported_self_type.GetUnboundType();
}
// if 'self' is a weak storage type, it must be an optional. Look through
// it and unpack the argument of "optional".
if (swift::WeakStorageType *weak_storage_type =
((swift::TypeBase *)imported_self_type.GetOpaqueQualType())
->getAs<swift::WeakStorageType>()) {
swift::Type referent_type = weak_storage_type->getReferentType();
swift::BoundGenericEnumType *optional_type =
referent_type->getAs<swift::BoundGenericEnumType>();
if (!optional_type) {
break;
}
swift::Type first_arg_type = optional_type->getGenericArgs()[0];
swift::ClassType *self_class_type =
first_arg_type->getAs<swift::ClassType>();
if (!self_class_type) {
break;
}
imported_self_type =
CompilerType(imported_self_type.GetTypeSystem(), self_class_type);
}
imported_self_type_flags.Reset(imported_self_type.GetTypeInfo());
if (imported_self_type_flags.AllClear(lldb::eTypeIsArchetype)) {
swift::ValueDecl *type_alias_decl = nullptr;
type_alias_decl = manipulator.MakeGlobalTypealias(
swift_ast_context.GetASTContext()->getIdentifier("$__lldb_context"),
imported_self_type);
if (!type_alias_decl) {
Log *log(
lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_EXPRESSIONS));
if (log)
log->Printf("SEP:AddRequiredAliases: Failed to make the "
"$__lldb_context typealias.");
}
} else {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_EXPRESSIONS));
if (log)
log->Printf("SEP:AddRequiredAliases: Failed to resolve the self "
"archetype - could not make the $__lldb_context "
"typealias.");
}
} while (0);
// Emit the typedefs
for (const Expression::SwiftGenericInfo::Binding &binding :
generic_info.function_bindings) {
CompilerType bound_type = binding.type;
if (!llvm::isa<SwiftASTContext>(bound_type.GetTypeSystem()))
continue;
CompilerType imported_bound_type =
ImportType(swift_ast_context, bound_type);
if (!imported_bound_type.IsValid())
continue;
std::string alias_name("$__lldb_typeof_generic_");
alias_name.append(binding.name);
swift::ValueDecl *type_alias_decl = manipulator.MakeGlobalTypealias(
swift_ast_context.GetASTContext()->getIdentifier(alias_name),
imported_bound_type);
if (!type_alias_decl)
continue;
}
}
static void CountLocals(
SymbolContext &sc, lldb::StackFrameSP &stack_frame_sp,
SwiftASTContext &ast_context,
llvm::SmallVectorImpl<SwiftASTManipulator::VariableInfo> &local_variables) {
std::set<ConstString> counted_names; // avoids shadowing
if (!sc.block && !sc.function)
return;
Block *block = sc.block;
Block *top_block = block->GetContainingInlinedBlock();
if (!top_block)
top_block = &sc.function->GetBlock(true);
static ConstString s_self_name("self");
SwiftLanguageRuntime *language_runtime = nullptr;
ExecutionContextScope *scope = nullptr;
if (stack_frame_sp) {
language_runtime =
stack_frame_sp->GetThread()->GetProcess()->GetSwiftLanguageRuntime();
scope = stack_frame_sp.get();
}
// The module scoped variables are stored at the CompUnit level, so after we
// go through the current context,
// then we have to take one more pass through the variables in the CompUnit.
bool handling_globals = false;
while (true) {
VariableList variables;
if (!handling_globals) {
constexpr bool can_create = true;
constexpr bool get_parent_variables = false;
constexpr bool stop_if_block_is_inlined_function = true;
block->AppendVariables(can_create, get_parent_variables,
stop_if_block_is_inlined_function,
[](Variable *) { return true; }, &variables);
} else {
if (sc.comp_unit) {
lldb::VariableListSP globals_sp = sc.comp_unit->GetVariableList(true);
if (globals_sp)
variables.AddVariables(globals_sp.get());
}
}
for (size_t vi = 0, ve = variables.GetSize(); vi != ve; ++vi) {
lldb::VariableSP variable_sp(variables.GetVariableAtIndex(vi));
const ConstString &name(variable_sp->GetName());
const char *name_cstring = variable_sp->GetName().GetCString();
if (name.IsEmpty())
continue;
if (counted_names.count(name))
continue;
CompilerType var_type;
if (stack_frame_sp) {
lldb::ValueObjectSP valobj_sp =
stack_frame_sp->GetValueObjectForFrameVariable(
variable_sp, lldb::eNoDynamicValues);
if (!valobj_sp || valobj_sp->GetError().Fail()) {
// Ignore the variable if we couldn't find its corresponding value
// object.
// TODO if the expression tries to use an ignored variable, produce a
// sensible error.
continue;
} else {
var_type = valobj_sp->GetCompilerType();
}
if (var_type.IsValid() && !SwiftASTContext::IsFullyRealized(var_type)) {
lldb::ValueObjectSP dynamic_valobj_sp =
valobj_sp->GetDynamicValue(lldb::eDynamicDontRunTarget);
if (!dynamic_valobj_sp || dynamic_valobj_sp->GetError().Fail()) {
continue;
}
}
}
if (!var_type.IsValid()) {
Type *var_lldb_type = variable_sp->GetType();
if (var_lldb_type)
var_type = var_lldb_type->GetFullCompilerType();
}
if (!var_type.IsValid())
continue;
if (!llvm::isa<SwiftASTContext>(var_type.GetTypeSystem()))
continue;
Status error;
CompilerType target_type = ast_context.ImportType(var_type, error);
// If the import failed, give up
if (!target_type.IsValid())
continue;
// Make sure to resolve all archetypes in the variable type.
if (language_runtime && stack_frame_sp)
target_type = language_runtime->DoArchetypeBindingForType(
*stack_frame_sp, target_type);
// If we couldn't fully realize the type, then we aren't going to get very
// far making a local out of it,
// so discard it here.
Log *log(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_TYPES |
LIBLLDB_LOG_EXPRESSIONS));
if (!SwiftASTContext::IsFullyRealized(target_type)) {
if (log) {
log->Printf("Discarding local %s because we couldn't fully realize "
"it, our best attempt was: %s.",
name_cstring,
target_type.GetTypeName().AsCString("<unknown>"));
}
continue;
}
SwiftASTManipulatorBase::VariableMetadataSP metadata_sp(
new VariableMetadataVariable(variable_sp));
const char *overridden_name = name_cstring;
if (name == s_self_name) {
overridden_name = ConstString("$__lldb_injected_self").AsCString();
if (log) {
swift::TypeBase *swift_type =
(swift::TypeBase *)target_type.GetOpaqueQualType();
if (swift_type) {
std::string s;
llvm::raw_string_ostream ss(s);
swift_type->dump(ss);
ss.flush();
log->Printf("Adding injected self: type (%p) context(%p) is: %s",
swift_type, ast_context.GetASTContext(), s.c_str());
}
}
}
SwiftASTManipulator::VariableInfo variable_info(
target_type,
ast_context.GetASTContext()->getIdentifier(overridden_name),
metadata_sp,
swift::VarDecl::Specifier::Var);
local_variables.push_back(variable_info);
counted_names.insert(name);
}
if (handling_globals) {
// Okay, now we're done...
break;
} else if (block == top_block) {
// Now add the containing module block, that's what holds the module
// globals:
handling_globals = true;
} else
block = block->GetParent();
}
}
static void ResolveSpecialNames(
SymbolContext &sc, ExecutionContextScope &exe_scope,
SwiftASTContext &ast_context,
llvm::SmallVectorImpl<swift::Identifier> &special_names,
llvm::SmallVectorImpl<SwiftASTManipulator::VariableInfo> &local_variables) {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_EXPRESSIONS));
if (!sc.target_sp)
return;
auto *persistent_state =
sc.target_sp->GetSwiftPersistentExpressionState(exe_scope);
std::set<ConstString> resolved_names;
for (swift::Identifier &name : special_names) {
ConstString name_cs = ConstString(name.str());
if (resolved_names.count(name_cs))
continue;
resolved_names.insert(name_cs);
if (log)
log->Printf("Resolving special name %s", name_cs.AsCString());
lldb::ExpressionVariableSP expr_var_sp =
persistent_state->GetVariable(name_cs);
if (!expr_var_sp)
continue;
CompilerType var_type = expr_var_sp->GetCompilerType();
if (!var_type.IsValid())
continue;
if (!llvm::isa<SwiftASTContext>(var_type.GetTypeSystem()))
continue;
CompilerType target_type;
Status error;
target_type = ast_context.ImportType(var_type, error);
if (!target_type)
continue;
SwiftASTManipulatorBase::VariableMetadataSP metadata_sp(
new VariableMetadataPersistent(expr_var_sp));
auto specifier = llvm::cast<SwiftExpressionVariable>(expr_var_sp.get())
->GetIsModifiable()
? swift::VarDecl::Specifier::Var
: swift::VarDecl::Specifier::Let;
SwiftASTManipulator::VariableInfo variable_info(
target_type, ast_context.GetASTContext()->getIdentifier(name.str()),
metadata_sp, specifier);
local_variables.push_back(variable_info);
}
}
//----------------------------------------------------------------------
// Diagnostics are part of the ShintASTContext and we must enable and
// disable colorization manually in the ShintASTContext. We need to
// ensure that if we modify the setting that we restore it to what it
// was. This class helps us to do that without having to intrument all
// returns from a function, like in SwiftExpressionParser::Parse(...).
//----------------------------------------------------------------------
class SetColorize {
public:
SetColorize(SwiftASTContext *swift_ast, bool colorize)
: m_swift_ast(swift_ast),
m_saved_colorize(swift_ast->SetColorizeDiagnostics(colorize)) {}
~SetColorize() { m_swift_ast->SetColorizeDiagnostics(m_saved_colorize); }
protected:
SwiftASTContext *m_swift_ast;
const bool m_saved_colorize;
};
/// Initialize the SwiftASTContext and return the wrapped
/// swift::ASTContext when successful.
static swift::ASTContext *SetupASTContext(
SwiftASTContext *swift_ast_context, DiagnosticManager &diagnostic_manager,
std::function<bool()> disable_objc_runtime, bool repl, bool playground) {
if (!swift_ast_context) {
diagnostic_manager.PutString(
eDiagnosticSeverityError,
"No AST context to parse into. Please parse with a target.\n");
return nullptr;
}
// Lazily get the clang importer if we can to make sure it exists in case we
// need it
if (!swift_ast_context->GetClangImporter()) {
diagnostic_manager.PutString(
eDiagnosticSeverityError,
"Swift expressions require OS X 10.10 / iOS 8 SDKs or later.\n");
return nullptr;
}
if (swift_ast_context->HasFatalErrors()) {
diagnostic_manager.PutString(eDiagnosticSeverityError,
"The AST context is in a fatal error state.");
return nullptr;
}
swift::ASTContext *ast_context = swift_ast_context->GetASTContext();
if (!ast_context) {
diagnostic_manager.PutString(
eDiagnosticSeverityError,
"Couldn't initialize the AST context. Please check your settings.");
return nullptr;
}
if (swift_ast_context->HasFatalErrors()) {
diagnostic_manager.PutString(eDiagnosticSeverityError,
"The AST context is in a fatal error state.");
return nullptr;
}
// TODO find a way to get contraint-solver output sent to a stream so we can
// log it
// swift_ast_context->GetLanguageOptions().DebugConstraintSolver = true;
swift_ast_context->ClearDiagnostics();
// Make a class that will set/restore the colorize setting in the
// SwiftASTContext for us
// SetColorize colorize(swift_ast_context,
// stream.GetFlags().Test(Stream::eANSIColor));
swift_ast_context->GetLanguageOptions().DebuggerSupport = true;
swift_ast_context->GetLanguageOptions().EnableDollarIdentifiers =
true; // No longer part of debugger support, set it separately.
swift_ast_context->GetLanguageOptions().EnableAccessControl =
(repl || playground);
swift_ast_context->GetLanguageOptions().EnableTargetOSChecking = false;
if (disable_objc_runtime())
swift_ast_context->GetLanguageOptions().EnableObjCInterop = false;
if (repl || playground) {
swift_ast_context->GetLanguageOptions().Playground = true;
swift_ast_context->GetIRGenOptions().Playground = true;
} else {
swift_ast_context->GetLanguageOptions().Playground = true;
swift_ast_context->GetIRGenOptions().Playground = false;
}
// For the expression parser and REPL we want to relax the
// requirement that you put "try" in front of every expression that
// might throw.
if (repl || !playground)
swift_ast_context->GetLanguageOptions().EnableThrowWithoutTry = true;
swift_ast_context->GetIRGenOptions().OptMode =
swift::OptimizationMode::NoOptimization;
// Normally we'd like to verify, but unfortunately the verifier's
// error mode is abort().
swift_ast_context->GetIRGenOptions().Verify = false;
return ast_context;
}
/// Returns the buffer_id for the expression's source code.
static std::pair<unsigned, std::string>
CreateMainFile(SwiftASTContext &swift_ast_context, StringRef filename,
StringRef text, const EvaluateExpressionOptions &options) {
const bool generate_debug_info = options.GetGenerateDebugInfo();
swift_ast_context.SetGenerateDebugInfo(generate_debug_info
? swift::IRGenDebugInfoKind::Normal
: swift::IRGenDebugInfoKind::None);
swift::IRGenOptions &ir_gen_options = swift_ast_context.GetIRGenOptions();
if (generate_debug_info) {
std::string temp_source_path;
if (ExpressionSourceCode::SaveExpressionTextToTempFile(text, options,
temp_source_path)) {
auto error_or_buffer_ap =
llvm::MemoryBuffer::getFile(temp_source_path.c_str());
if (error_or_buffer_ap.getError() == std::error_condition()) {
unsigned buffer_id =
swift_ast_context.GetSourceManager().addNewSourceBuffer(
std::move(error_or_buffer_ap.get()));
llvm::SmallString<256> source_dir(temp_source_path);
llvm::sys::path::remove_filename(source_dir);
ir_gen_options.DebugCompilationDir = source_dir.str();
return {buffer_id, temp_source_path};
}
}
}
std::unique_ptr<llvm::MemoryBuffer> expr_buffer(
llvm::MemoryBuffer::getMemBufferCopy(text, filename));
unsigned buffer_id = swift_ast_context.GetSourceManager().addNewSourceBuffer(
std::move(expr_buffer));
return {buffer_id, filename};
}
/// Attempt to materialize one variable.
static llvm::Optional<SwiftExpressionParser::SILVariableInfo>
MaterializeVariable(SwiftASTManipulatorBase::VariableInfo &variable,
SwiftUserExpression &user_expression,
Materializer &materializer,
SwiftASTManipulator &manipulator,
lldb::StackFrameWP &stack_frame_wp,
DiagnosticManager &diagnostic_manager, Log *log,
bool repl) {
uint64_t offset = 0;
bool needs_init = false;
bool is_result =
variable.MetadataIs<SwiftASTManipulatorBase::VariableMetadataResult>();
bool is_error =
variable.MetadataIs<SwiftASTManipulatorBase::VariableMetadataError>();
if (is_result || is_error) {
needs_init = true;
Status error;
if (repl) {
if (swift::TypeBase *swift_type =
(swift::TypeBase *)variable.GetType().GetOpaqueQualType()) {
if (!swift_type->getCanonicalType()->isVoid()) {
auto &repl_mat = *llvm::cast<SwiftREPLMaterializer>(&materializer);
if (is_result)
offset = repl_mat.AddREPLResultVariable(
variable.GetType(), variable.GetDecl(),
&user_expression.GetResultDelegate(), error);
else
offset = repl_mat.AddREPLResultVariable(
variable.GetType(), variable.GetDecl(),
&user_expression.GetErrorDelegate(), error);
}
}
} else {
CompilerType actual_type(variable.GetType());
if (Flags(actual_type.GetTypeInfo())
.AllSet(lldb::eTypeIsSwift | lldb::eTypeIsArchetype)) {
lldb::StackFrameSP stack_frame_sp = stack_frame_wp.lock();
if (stack_frame_sp && stack_frame_sp->GetThread() &&
stack_frame_sp->GetThread()->GetProcess()) {
SwiftLanguageRuntime *swift_runtime = stack_frame_sp->GetThread()
->GetProcess()
->GetSwiftLanguageRuntime();
if (swift_runtime) {
actual_type = swift_runtime->GetConcreteType(
stack_frame_sp.get(), actual_type.GetTypeName());
if (actual_type.IsValid())
variable.SetType(actual_type);
else
actual_type = variable.GetType();
}
}
}
swift::Type actual_swift_type =
swift::Type((swift::TypeBase *)actual_type.GetOpaqueQualType());
swift::Type fixed_type = manipulator.FixupResultType(
actual_swift_type, user_expression.GetLanguageFlags());
if (!fixed_type.isNull()) {
actual_type =
CompilerType(actual_type.GetTypeSystem(), fixed_type.getPointer());
variable.SetType(actual_type);
}
if (is_result)
offset = materializer.AddResultVariable(
actual_type, false, true, &user_expression.GetResultDelegate(),
error);
else
offset = materializer.AddResultVariable(
actual_type, false, true, &user_expression.GetErrorDelegate(),
error);
}
if (!error.Success()) {
diagnostic_manager.Printf(
eDiagnosticSeverityError, "couldn't add %s variable to struct: %s.\n",
is_result ? "result" : "error", error.AsCString());
return llvm::None;
}
if (log)
log->Printf("Added %s variable to struct at offset %llu",
is_result ? "result" : "error", (unsigned long long)offset);
} else if (variable.MetadataIs<VariableMetadataVariable>()) {
Status error;
VariableMetadataVariable *variable_metadata =
static_cast<VariableMetadataVariable *>(variable.m_metadata.get());
offset = materializer.AddVariable(variable_metadata->m_variable_sp, error);
if (!error.Success()) {
diagnostic_manager.Printf(eDiagnosticSeverityError,
"couldn't add variable to struct: %s.\n",
error.AsCString());
return llvm::None;
}
if (log)
log->Printf("Added variable %s to struct at offset %llu",
variable_metadata->m_variable_sp->GetName().AsCString(),
(unsigned long long)offset);
} else if (variable.MetadataIs<VariableMetadataPersistent>()) {
VariableMetadataPersistent *variable_metadata =
static_cast<VariableMetadataPersistent *>(variable.m_metadata.get());
needs_init = llvm::cast<SwiftExpressionVariable>(
variable_metadata->m_persistent_variable_sp.get())
->m_swift_flags &
SwiftExpressionVariable::EVSNeedsInit;
Status error;
offset = materializer.AddPersistentVariable(
variable_metadata->m_persistent_variable_sp,
&user_expression.GetPersistentVariableDelegate(), error);
if (!error.Success()) {
diagnostic_manager.Printf(eDiagnosticSeverityError,
"couldn't add variable to struct: %s.\n",
error.AsCString());
return llvm::None;
}
if (log)
log->Printf(
"Added persistent variable %s with flags 0x%llx to "
"struct at offset %llu",
variable_metadata->m_persistent_variable_sp->GetName().AsCString(),
(unsigned long long)
variable_metadata->m_persistent_variable_sp->m_flags,
(unsigned long long)offset);
}
return SwiftExpressionParser::SILVariableInfo(variable.GetType(), offset,
needs_init);
}
namespace {
/// This error indicates that the error has already been diagnosed.
struct PropagatedError : public llvm::ErrorInfo<PropagatedError> {
static char ID;
void log(llvm::raw_ostream &OS) const override { OS << "Propagated"; }
std::error_code convertToErrorCode() const override {
return inconvertibleErrorCode();
}
};
/// This indicates an error in the SwiftASTContext.
struct SwiftASTContextError : public llvm::ErrorInfo<SwiftASTContextError> {
static char ID;
void log(llvm::raw_ostream &OS) const override { OS << "SwiftASTContext"; }
std::error_code convertToErrorCode() const override {
return inconvertibleErrorCode();
}
};
/// This indicates an error in the SwiftASTContext.
struct ModuleImportError : public llvm::ErrorInfo<ModuleImportError> {
static char ID;
std::string Message;
ModuleImportError(llvm::Twine Message) : Message(Message.str()) {}
void log(llvm::raw_ostream &OS) const override { OS << "ModuleImport"; }
std::error_code convertToErrorCode() const override {
return inconvertibleErrorCode();
}
};
char PropagatedError::ID = 0;
char SwiftASTContextError::ID = 0;
char ModuleImportError::ID = 0;
/// This holds the result of ParseAndImport.
struct ParsedExpression {
std::unique_ptr<SwiftASTManipulator> code_manipulator;
swift::ASTContext &ast_context;
swift::ModuleDecl &module;
LLDBNameLookup &external_lookup;
swift::SourceFile &source_file;
std::string main_filename;
unsigned buffer_id;
};
} // namespace
/// Attempt to parse an expression and import all the Swift modules
/// the expression and its context depend on.
static llvm::Expected<ParsedExpression>
ParseAndImport(SwiftASTContext *swift_ast_context, Expression &expr,
SwiftExpressionParser::SILVariableMap &variable_map,
unsigned &buffer_id, DiagnosticManager &diagnostic_manager,
SwiftExpressionParser &swift_expr_parser,
lldb::StackFrameWP &stack_frame_wp, SymbolContext &sc,
ExecutionContextScope &exe_scope,
const EvaluateExpressionOptions &options, bool repl,
bool playground) {
auto should_disable_objc_runtime = [&]() {
lldb::StackFrameSP this_frame_sp(stack_frame_wp.lock());
if (!this_frame_sp)
return false;
lldb::ProcessSP process_sp(this_frame_sp->CalculateProcess());
if (!process_sp)
return false;
return !process_sp->GetObjCLanguageRuntime();
};
swift::ASTContext *ast_context =
SetupASTContext(swift_ast_context, diagnostic_manager,
should_disable_objc_runtime, repl, playground);
if (!ast_context)
return make_error<PropagatedError>();
// If we are using the playground, hand import the necessary modules.
// FIXME: We won't have to do this once the playground adds import statements
// for the things it needs itself.
if (playground) {
auto *persistent_state =
sc.target_sp->GetSwiftPersistentExpressionState(exe_scope);
persistent_state->AddHandLoadedModule(ConstString("Swift"));
}
std::string main_filename;
std::tie(buffer_id, main_filename) = CreateMainFile(
*swift_ast_context, repl ? "<REPL>" : "<EXPR>", expr.Text(), options);
char expr_name_buf[32];
snprintf(expr_name_buf, sizeof(expr_name_buf), "__lldb_expr_%u",
options.GetExpressionNumber());
auto module_id = ast_context->getIdentifier(expr_name_buf);
auto &module = *swift::ModuleDecl::create(module_id, *ast_context);
const auto implicit_import_kind =
swift::SourceFile::ImplicitModuleImportKind::Stdlib;
auto &invocation = swift_ast_context->GetCompilerInvocation();
invocation.getFrontendOptions().ModuleName = expr_name_buf;
invocation.getIRGenOptions().ModuleName = expr_name_buf;
swift::SourceFileKind source_file_kind = swift::SourceFileKind::Library;
if (playground || repl) {
source_file_kind = swift::SourceFileKind::Main;
}
swift::SourceFile *source_file = new (*ast_context) swift::SourceFile(
module, source_file_kind, buffer_id, implicit_import_kind,
/*Keep tokens*/ false);
module.addFile(*source_file);
bool done = false;
auto *external_lookup = new LLDBNameLookup(swift_expr_parser, *source_file,
variable_map, sc, exe_scope);
// FIXME: This call is here just so that the we keep the
// DebuggerClients alive as long as the Module we are not inserting
// them in.
swift_ast_context->AddDebuggerClient(external_lookup);
swift::PersistentParserState persistent_state;
while (!done) {
swift::parseIntoSourceFile(*source_file, buffer_id, &done, nullptr,
&persistent_state);
if (swift_ast_context->HasErrors())
return make_error<SwiftASTContextError>();
}
// This currently crashes with Assertion failed: (BufferID != -1), function
// findBufferContainingLoc, file
// llvm/tools/swift/include/swift/Basic/SourceManager.h, line 92.
// if (log)
// {
// std::string s;
// llvm::raw_string_ostream ss(s);
// source_file->dump(ss);
// ss.flush();
//
// log->Printf("Source file after parsing:");
// log->PutCString(s.c_str());
// }
if (!done)
return make_error<llvm::StringError>(
"Parse did not consume the whole expression.",
inconvertibleErrorCode());
std::unique_ptr<SwiftASTManipulator> code_manipulator;
if (repl || !playground) {
code_manipulator =
llvm::make_unique<SwiftASTManipulator>(*source_file, repl);
code_manipulator->RewriteResult();
}
Status auto_import_error;
if (!PerformAutoImport(*swift_ast_context, sc, exe_scope, stack_frame_wp,
*source_file, false, auto_import_error))
return make_error<ModuleImportError>(llvm::Twine("in auto-import:\n") +
auto_import_error.AsCString());
// Swift Modules that rely on shared libraries (not frameworks)
// don't record the link information in the swiftmodule file, so we
// can't really make them work without outside information.
// However, in the REPL you can added -L & -l options to the initial
// compiler startup, and we should dlopen anything that's been
// stuffed on there and hope it will be useful later on.
if (repl) {
lldb::StackFrameSP this_frame_sp(stack_frame_wp.lock());
if (this_frame_sp) {
lldb::ProcessSP process_sp(this_frame_sp->CalculateProcess());
if (process_sp) {
Status error;
swift_ast_context->LoadExtraDylibs(*process_sp.get(), error);
}
}
}
if (!playground && !repl) {
lldb::StackFrameSP stack_frame_sp = stack_frame_wp.lock();
bool local_context_is_swift = true;
if (sc.block) {
Function *function = sc.block->CalculateSymbolContextFunction();
if (function && function->GetLanguage() != lldb::eLanguageTypeSwift)
local_context_is_swift = false;
}
llvm::SmallVector<SwiftASTManipulator::VariableInfo, 5> local_variables;
if (local_context_is_swift) {
AddRequiredAliases(sc.block, stack_frame_sp, *swift_ast_context,
*code_manipulator, expr.GetSwiftGenericInfo());
// Register all local variables so that lookups to them resolve.
CountLocals(sc, stack_frame_sp, *swift_ast_context, local_variables);
}
// Register all magic variables.
llvm::SmallVector<swift::Identifier, 2> special_names;
llvm::StringRef persistent_var_prefix;
if (!repl)
persistent_var_prefix = "$";
code_manipulator->FindSpecialNames(special_names, persistent_var_prefix);
ResolveSpecialNames(sc, exe_scope, *swift_ast_context, special_names,
local_variables);
code_manipulator->AddExternalVariables(local_variables);
// This currently crashes with Assertion failed: (BufferID != -1), function
// findBufferContainingLoc, file
// llvm/tools/swift/include/swift/Basic/SourceManager.h, line 92.
// if (log)
// {
// std::string s;
// llvm::raw_string_ostream ss(s);
// source_file->dump(ss);
// ss.flush();
//
// log->Printf("Source file after code manipulation:");
// log->PutCString(s.c_str());
// }
stack_frame_sp.reset();
}
swift::performNameBinding(*source_file);
if (swift_ast_context->HasErrors())
return make_error<SwiftASTContextError>();
// Do the auto-importing after Name Binding, that's when the Imports for the
// source file are figured out.
{
std::lock_guard<std::recursive_mutex> global_context_locker(
IRExecutionUnit::GetLLVMGlobalContextMutex());
Status auto_import_error;
if (!PerformAutoImport(*swift_ast_context, sc, exe_scope, stack_frame_wp,
*source_file, true, auto_import_error)) {
return make_error<ModuleImportError>(llvm::Twine("in auto-import:\n") +
auto_import_error.AsCString());
}
}
ParsedExpression result = {std::move(code_manipulator),
*ast_context,
module,
*external_lookup,
*source_file,
std::move(main_filename)};
return std::move(result);
}
unsigned SwiftExpressionParser::Parse(DiagnosticManager &diagnostic_manager,
uint32_t first_line, uint32_t last_line,
uint32_t line_offset) {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_EXPRESSIONS));
SwiftExpressionParser::SILVariableMap variable_map;
// Helper function to diagnose errors in m_swift_ast_context.
unsigned buffer_id = UINT32_MAX;
auto DiagnoseSwiftASTContextError = [&]() {
assert(m_swift_ast_context->HasErrors() && "error expected");
m_swift_ast_context->PrintDiagnostics(diagnostic_manager, buffer_id,
first_line, last_line, line_offset);
};
// In the case of playgrounds, we turn all rewriting functionality off.
const bool repl = m_options.GetREPLEnabled();
const bool playground = m_options.GetPlaygroundTransformEnabled();
if (!m_exe_scope)
return false;
// Parse the expression an import all nececssary swift modules.
auto parsed_expr = ParseAndImport(
m_swift_ast_context, m_expr, variable_map, buffer_id, diagnostic_manager,
*this, m_stack_frame_wp, m_sc, *m_exe_scope, m_options, repl, playground);
if (!parsed_expr) {
bool retry = false;
handleAllErrors(parsed_expr.takeError(),
[&](const ModuleImportError &MIE) {
if (m_sc.target_sp->UseScratchTypesystemPerModule())
// Already on backup power.
diagnostic_manager.PutString(eDiagnosticSeverityError,
MIE.Message);
else
// Discard the shared scratch context and retry.
retry = true;
},
[&](const SwiftASTContextError &SACE) {
DiagnoseSwiftASTContextError();
},
[&](const StringError &SE) {
diagnostic_manager.PutString(eDiagnosticSeverityError,
SE.getMessage());
},
[](const PropagatedError &P) {});
// Unrecoverable error?
if (!retry)
return 1;
// Signal that we want to retry the expression exactly once with
// a fresh SwiftASTContext initialized with the flags from the
// current lldb::Module / Swift dylib to avoid header search
// mismatches.
m_sc.target_sp->SetUseScratchTypesystemPerModule(true);
return 2;
}
// Not persistent because we're building source files one at a time.
swift::TopLevelContext top_level_context;
swift::OptionSet<swift::TypeCheckingFlags> type_checking_options;
swift::performTypeChecking(parsed_expr->source_file, top_level_context,
type_checking_options);
if (m_swift_ast_context->HasErrors()) {
DiagnoseSwiftASTContextError();
return 1;
}
if (log) {
std::string s;
llvm::raw_string_ostream ss(s);
parsed_expr->source_file.dump(ss);
ss.flush();
log->Printf("Source file after type checking:");
log->PutCString(s.c_str());
}
if (repl) {
parsed_expr->code_manipulator->MakeDeclarationsPublic();
}
Status error;
if (!playground) {
parsed_expr->code_manipulator->FixupResultAfterTypeChecking(error);
if (!error.Success()) {
diagnostic_manager.PutString(eDiagnosticSeverityError, error.AsCString());
return 1;
}
} else {
swift::performPlaygroundTransform(parsed_expr->source_file, true);
swift::typeCheckExternalDefinitions(parsed_expr->source_file);
}
// I think we now have to do the name binding and type checking again, but
// there should be only the result
// variable to bind up at this point.
if (log) {
std::string s;
llvm::raw_string_ostream ss(s);
parsed_expr->source_file.dump(ss);
ss.flush();
log->Printf("Source file after FixupResult:");
log->PutCString(s.c_str());
}
// Allow variables to be re-used from previous REPL statements.
if (m_sc.target_sp && (repl || !playground)) {
// Do this first so we don't pollute the persistent variable
// namespace.
if (!parsed_expr->code_manipulator->CheckPatternBindings()) {
DiagnoseSwiftASTContextError();
return 1;
}
Status error;
SwiftASTContext *scratch_ast_context = m_swift_ast_context;
if (scratch_ast_context) {
auto *persistent_state =
m_sc.target_sp->GetSwiftPersistentExpressionState(*m_exe_scope);
llvm::SmallVector<size_t, 1> declaration_indexes;
parsed_expr->code_manipulator->FindVariableDeclarations(
declaration_indexes, repl);
for (size_t declaration_index : declaration_indexes) {
SwiftASTManipulator::VariableInfo &variable_info =
parsed_expr->code_manipulator->GetVariableInfo()[declaration_index];
CompilerType imported_type =
ImportType(*scratch_ast_context, variable_info.GetType());
if (imported_type) {
lldb::ExpressionVariableSP persistent_variable =
persistent_state->AddNewlyConstructedVariable(
new SwiftExpressionVariable(
m_sc.target_sp.get(),
ConstString(variable_info.GetName().str()), imported_type,
m_sc.target_sp->GetArchitecture().GetByteOrder(),
m_sc.target_sp->GetArchitecture().GetAddressByteSize()));
if (repl) {
persistent_variable->m_flags |= ExpressionVariable::EVKeepInTarget;
persistent_variable->m_flags |=
ExpressionVariable::EVIsProgramReference;
} else {
persistent_variable->m_flags |=
ExpressionVariable::EVNeedsAllocation;
persistent_variable->m_flags |= ExpressionVariable::EVKeepInTarget;
llvm::cast<SwiftExpressionVariable>(persistent_variable.get())
->m_swift_flags |= SwiftExpressionVariable::EVSNeedsInit;
}
swift::VarDecl *decl = variable_info.GetDecl();
if (decl) {
if (decl->isLet()) {
llvm::cast<SwiftExpressionVariable>(persistent_variable.get())
->SetIsModifiable(false);
}
if (decl->getStorageKind() ==
swift::VarDecl::StorageKindTy::Computed) {
llvm::cast<SwiftExpressionVariable>(persistent_variable.get())
->SetIsComputed(true);
}
}
variable_info.m_metadata.reset(
new VariableMetadataPersistent(persistent_variable));
persistent_state->RegisterSwiftPersistentDecl(decl);
}
}
if (repl) {
llvm::SmallVector<swift::ValueDecl *, 1> non_variables;
parsed_expr->code_manipulator->FindNonVariableDeclarations(
non_variables);
for (swift::ValueDecl *decl : non_variables) {
persistent_state->RegisterSwiftPersistentDecl(decl);
}
}
}
}
if (!playground && !repl) {
parsed_expr->code_manipulator->FixCaptures();
// This currently crashes with Assertion failed: (BufferID != -1),
// function findBufferContainingLoc, file
// llvm/tools/swift/include/swift/Basic/SourceManager.h, line 92.
// if (log)
// {
// std::string s;
// llvm::raw_string_ostream ss(s);
// parsed_expr->source_file.dump(ss);
// ss.flush();
//
// log->Printf("Source file after capture fixing:");
// log->PutCString(s.c_str());
// }
if (log) {
log->Printf("Variables:");
for (const SwiftASTManipulatorBase::VariableInfo &variable :
parsed_expr->code_manipulator->GetVariableInfo()) {
StreamString ss;
variable.Print(ss);
log->Printf(" %s", ss.GetData());
}
}
}
if (repl || !playground)
if (auto *materializer = m_expr.GetMaterializer())
for (auto &variable : parsed_expr->code_manipulator->GetVariableInfo()) {
auto &swift_expr = *static_cast<SwiftUserExpression *>(&m_expr);
auto var_info = MaterializeVariable(
variable, swift_expr, *materializer, *parsed_expr->code_manipulator,
m_stack_frame_wp, diagnostic_manager, log, repl);
if (!var_info)
return 1;
const char *name = ConstString(variable.GetName().get()).GetCString();
variable_map[name] = *var_info;
}
std::unique_ptr<swift::SILModule> sil_module(swift::performSILGeneration(
parsed_expr->source_file, m_swift_ast_context->GetSILOptions()));
if (log) {
std::string s;
llvm::raw_string_ostream ss(s);
const bool verbose = false;
sil_module->print(ss, verbose, &parsed_expr->module);
ss.flush();
log->Printf("SIL module before linking:");
log->PutCString(s.c_str());
}
swift::performSILLinking(sil_module.get());
if (m_swift_ast_context->HasErrors()) {
DiagnoseSwiftASTContextError();
return 1;
}
if (log) {
std::string s;
llvm::raw_string_ostream ss(s);
const bool verbose = false;
sil_module->print(ss, verbose, &parsed_expr->module);
ss.flush();
log->Printf("Generated SIL module:");
log->PutCString(s.c_str());
}
runSILDiagnosticPasses(*sil_module);
if (log) {
std::string s;
llvm::raw_string_ostream ss(s);
const bool verbose = false;
sil_module->print(ss, verbose, &parsed_expr->module);
ss.flush();
log->Printf("SIL module after diagnostic passes:");
log->PutCString(s.c_str());
}
if (m_swift_ast_context->HasErrors()) {
DiagnoseSwiftASTContextError();
return 1;
}
{
std::lock_guard<std::recursive_mutex> global_context_locker(
IRExecutionUnit::GetLLVMGlobalContextMutex());
m_module = swift::performIRGeneration(
m_swift_ast_context->GetIRGenOptions(), &parsed_expr->module,
std::move(sil_module), "lldb_module",
swift::PrimarySpecificPaths("", parsed_expr->main_filename),
SwiftASTContext::GetGlobalLLVMContext(), llvm::ArrayRef<std::string>());
}
if (m_swift_ast_context->HasErrors()) {
DiagnoseSwiftASTContextError();
return 1;
}
if (!m_module) {
diagnostic_manager.PutString(
eDiagnosticSeverityError,
"Couldn't IRGen expression, no additional error");
return 1;
}
if (log) {
std::string s;
llvm::raw_string_ostream ss(s);
m_module->print(ss, NULL);
ss.flush();
log->Printf("Generated IR module:");
log->PutCString(s.c_str());
}
{
std::lock_guard<std::recursive_mutex> global_context_locker(
IRExecutionUnit::GetLLVMGlobalContextMutex());
LLVMVerifyModule((LLVMOpaqueModule *)m_module.get(), LLVMReturnStatusAction,
nullptr);
}
if (m_swift_ast_context->HasErrors())
return 1;
// The Parse succeeded! Now put this module into the context's
// list of loaded modules, and copy the Decls that were globalized
// as part of the parse from the staging area in the external
// lookup object into the SwiftPersistentExpressionState.
swift::ModuleDecl *module = &parsed_expr->module;
parsed_expr->ast_context.LoadedModules.insert({module->getName(), module});
if (m_swift_ast_context)
m_swift_ast_context->CacheModule(module);
if (m_sc.target_sp) {
auto *persistent_state =
m_sc.target_sp->GetSwiftPersistentExpressionState(*m_exe_scope);
persistent_state->CopyInSwiftPersistentDecls(
parsed_expr->external_lookup.GetStagedDecls());
}
return 0;
}
static bool FindFunctionInModule(ConstString &mangled_name,
llvm::Module *module, const char *orig_name,
bool exact) {
swift::Demangle::Context demangle_ctx;
for (llvm::Module::iterator fi = module->getFunctionList().begin(),
fe = module->getFunctionList().end();
fi != fe; ++fi) {
if (exact) {
if (!fi->getName().str().compare(orig_name)) {
mangled_name.SetCString(fi->getName().str().c_str());
return true;
}
} else {
if (fi->getName().str().find(orig_name) != std::string::npos) {
mangled_name.SetCString(fi->getName().str().c_str());
return true;
}
// The new demangling is cannier about compression, so the name may
// not be in the mangled name plain. Let's demangle it and see if we
// can find it in the demangled nodes.
demangle_ctx.clear();
swift::Demangle::NodePointer node_ptr =
demangle_ctx.demangleSymbolAsNode(fi->getName());
if (node_ptr) {
if (node_ptr->getKind() != swift::Demangle::Node::Kind::Global)
continue;
if (node_ptr->getNumChildren() != 1)
continue;
node_ptr = node_ptr->getFirstChild();
if (node_ptr->getKind() != swift::Demangle::Node::Kind::Function)
continue;
size_t num_children = node_ptr->getNumChildren();
for (size_t i = 0; i < num_children; i++) {
swift::Demangle::NodePointer child_ptr = node_ptr->getChild(i);
if (child_ptr->getKind() == swift::Demangle::Node::Kind::Identifier) {
if (!child_ptr->hasText())
continue;
if (child_ptr->getText().contains(orig_name)) {
mangled_name.SetCString(fi->getName().str().c_str());
return true;
}
}
}
}
}
}
return false;
}
Status SwiftExpressionParser::PrepareForExecution(
lldb::addr_t &func_addr, lldb::addr_t &func_end,
lldb::IRExecutionUnitSP &execution_unit_sp, ExecutionContext &exe_ctx,
bool &can_interpret, ExecutionPolicy execution_policy) {
Status err;
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_EXPRESSIONS));
if (!m_module) {
err.SetErrorString("Can't prepare a NULL module for execution");
return err;
}
const char *orig_name = nullptr;
bool exact = false;
if (m_options.GetPlaygroundTransformEnabled() || m_options.GetREPLEnabled()) {
orig_name = "main";
exact = true;
} else {
orig_name = "$__lldb_expr";
}
ConstString function_name;
if (!FindFunctionInModule(function_name, m_module.get(), orig_name, exact)) {
err.SetErrorToGenericError();
err.SetErrorStringWithFormat("Couldn't find %s() in the module", orig_name);
return err;
} else {
if (log)
log->Printf("Found function %s for %s", function_name.AsCString(),
"$__lldb_expr");
}
// Retrieve an appropriate symbol context.
SymbolContext sc;
if (lldb::StackFrameSP frame_sp = exe_ctx.GetFrameSP()) {
sc = frame_sp->GetSymbolContext(lldb::eSymbolContextEverything);
} else if (lldb::TargetSP target_sp = exe_ctx.GetTargetSP()) {
sc.target_sp = target_sp;
}
std::vector<std::string> features;
std::unique_ptr<llvm::LLVMContext> llvm_context_up;
m_execution_unit_sp.reset(
new IRExecutionUnit(llvm_context_up,
m_module, // handed off here
function_name, exe_ctx.GetTargetSP(), sc, features));
// TODO figure out some way to work ClangExpressionDeclMap into this or do the
// equivalent
// for Swift
m_execution_unit_sp->GetRunnableInfo(err, func_addr, func_end);
execution_unit_sp = m_execution_unit_sp;
m_execution_unit_sp.reset();
return err;
}
bool SwiftExpressionParser::RewriteExpression(
DiagnosticManager &diagnostic_manager) {
// There isn't a Swift equivalent to clang::Rewriter, so we'll just use
// that...
if (!m_swift_ast_context)
return false;
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_EXPRESSIONS));
swift::SourceManager &source_manager =
m_swift_ast_context->GetSourceManager();
const DiagnosticList &diagnostics = diagnostic_manager.Diagnostics();
size_t num_diags = diagnostics.size();
if (num_diags == 0)
return false;
clang::RewriteBuffer rewrite_buf;
llvm::StringRef text_ref(m_expr.Text());
rewrite_buf.Initialize(text_ref);
for (const Diagnostic *diag : diagnostic_manager.Diagnostics()) {
const SwiftDiagnostic *diagnostic = llvm::dyn_cast<SwiftDiagnostic>(diag);
if (!(diagnostic && diagnostic->HasFixIts()))
continue;
const SwiftDiagnostic::FixItList &fixits = diagnostic->FixIts();
std::vector<swift::CharSourceRange> source_ranges;
for (const swift::DiagnosticInfo::FixIt &fixit : fixits) {
const swift::CharSourceRange &range = fixit.getRange();
swift::SourceLoc start_loc = range.getStart();
if (!start_loc.isValid()) {
// getLocOffsetInBuffer will assert if you pass it an invalid location,
// so we have to check that first.
if (log)
log->Printf(
"SwiftExpressionParser::RewriteExpression: ignoring fixit since "
"it contains an invalid source location: %s.",
range.str().str().c_str());
return false;
}
// ReplaceText can't handle replacing the same source range more than
// once, so we have to check that
// before we proceed:
if (std::find(source_ranges.begin(), source_ranges.end(), range) !=
source_ranges.end()) {
if (log)
log->Printf(
"SwiftExpressionParser::RewriteExpression: ignoring fix-it since "
"source range appears twice: %s.\n",
range.str().str().c_str());
return false;
} else
source_ranges.push_back(range);
// ReplaceText will either assert or crash if the start_loc isn't inside
// the buffer it is said to
// reside in. That shouldn't happen, but it doesn't hurt to check before
// we call ReplaceText.
auto *Buffer = source_manager.getLLVMSourceMgr().getMemoryBuffer(
diagnostic->GetBufferID());
if (!(start_loc.getOpaquePointerValue() >= Buffer->getBuffer().begin() &&
start_loc.getOpaquePointerValue() <= Buffer->getBuffer().end())) {
if (log)
log->Printf(
"SwiftExpressionParser::RewriteExpression: ignoring fixit since "
"it contains a source location not in the specified buffer: %s.",
range.str().str().c_str());
}
unsigned offset = source_manager.getLocOffsetInBuffer(
range.getStart(), diagnostic->GetBufferID());
rewrite_buf.ReplaceText(offset, range.getByteLength(), fixit.getText());
}
}
std::string fixed_expression;
llvm::raw_string_ostream out_stream(fixed_expression);
rewrite_buf.write(out_stream);
out_stream.flush();
diagnostic_manager.SetFixedExpression(fixed_expression);
return true;
}
| 1 | 16,159 | I was actually looking at this code over the weekend and wondering whether we were trying to link here. I assume it's been cargo culted from something else in the compiler. | apple-swift-lldb | cpp |
@@ -0,0 +1,14 @@
+<div class="form-group">
+ <%= f.label _('Name'), for: :name, class: "control-label" %>
+ <%= f.text_field :name, as: :string, class: "form-control", 'data-toggle': 'tooltip', title: _('Add an appropriate name for your guidance group. This name will be used to tell the end user where the guidance has come from. It will be appended to text identifying the theme e.g. "[guidance group name]: guidance on data sharing" so we suggest you just use the institution or department name.'), 'aria-required': true %>
+</div>
+
+<div class="checkbox">
+ <%= f.label :published, raw("#{f.check_box :published, 'data-toggle': 'tooltip', title: _('Check this box when you are ready for guidance associated with this group to appear on user\'s plans.')} #{_('Published')}") %>
+</div>
+
+<div class="checkbox">
+ <%= f.label :optional_subset, raw("#{f.check_box :optional_subset, 'data-toggle': 'tooltip', title: _('If the guidance is only meant for a subset of users e.g. those in a specific college or institute, check this box. Users will be able to select to display this subset guidance when answering questions in the \'create plan\' wizard.')} #{_('Optional Subset (e.g. School/Department)')}") %>
+</div>
+
+<%= f.submit _('Save'), class: 'btn btn-primary' %> | 1 | 1 | 16,965 | nice extraction of the form for being used at new/edit. We should have more of these | DMPRoadmap-roadmap | rb |
|
@@ -37,8 +37,8 @@ using namespace RDKit;
* Version 2 taken from J. Chem. Inf. Model. 56, 1 (2016)
*
* torsion-angle potential form:
- * V = V1*(1 + s1*cos(1x)) + V2*(1 + s2*cos(2x)) + V3*(1 + s3*cos(1x))
- * + V4*(1 + s4*cos(1x)) + V5*(1 + s5*cos(1x)) + V6*(1 + s6*cos(1x))
+ * V = V1*(1 + s1*cos(1x)) + V2*(1 + s2*cos(2x)) + V3*(1 + s3*cos(3x))
+ * + V4*(1 + s4*cos(4x)) + V5*(1 + s5*cos(5x)) + V6*(1 + s6*cos(6x))
*
* format: [SMARTS, s1, V1, s2, V2, s3, V3, s4, V4, s5, V5, s6, V6]
*/ | 1 | //
// Copyright (C) 2017 Sereina Riniker
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#include "TorsionPreferences.h"
#include <GraphMol/RDKitBase.h>
#include <Geometry/Utils.h>
#include <GraphMol/SmilesParse/SmilesParse.h>
#include <GraphMol/Substruct/SubstructMatch.h>
#include <RDGeneral/utils.h>
#include <RDGeneral/RDLog.h>
#include <RDGeneral/Exceptions.h>
#include <boost/dynamic_bitset.hpp>
#include <algorithm>
#include <iostream>
#include <sstream>
#include <RDGeneral/StreamOps.h>
#include <boost/lexical_cast.hpp>
#include <boost/tokenizer.hpp>
typedef boost::tokenizer<boost::char_separator<char>> tokenizer;
#include <boost/flyweight.hpp>
#include <boost/flyweight/key_value.hpp>
#include <boost/flyweight/no_tracking.hpp>
namespace ForceFields {
namespace CrystalFF {
using namespace RDKit;
/* SMARTS patterns for experimental torsion angle preferences
* Version 1 taken from J. Med. Chem. 56, 1026-2028 (2013)
* Version 2 taken from J. Chem. Inf. Model. 56, 1 (2016)
*
* torsion-angle potential form:
* V = V1*(1 + s1*cos(1x)) + V2*(1 + s2*cos(2x)) + V3*(1 + s3*cos(1x))
* + V4*(1 + s4*cos(1x)) + V5*(1 + s5*cos(1x)) + V6*(1 + s6*cos(1x))
*
* format: [SMARTS, s1, V1, s2, V2, s3, V3, s4, V4, s5, V5, s6, V6]
*/
#include "torsionPreferences_v1.in"
#include "torsionPreferences_v2.in"
#include "torsionPreferences_smallrings.in"
#include "torsionPreferences_macrocycles.in"
//! A structure used to the experimental torsion patterns
struct ExpTorsionAngle {
std::string smarts;
std::vector<double> V;
std::vector<int> signs;
boost::shared_ptr<const ROMol> dp_pattern;
unsigned int idx[4];
};
// class to store the experimental torsion angles
class ExpTorsionAngleCollection {
public:
typedef std::vector<ExpTorsionAngle> ParamsVect;
static const ExpTorsionAngleCollection *getParams(
unsigned int version, bool useSmallRingTorsions, bool useMacrocycleTorsions, const std::string ¶mData = "");
ParamsVect::const_iterator begin() const { return d_params.begin(); };
ParamsVect::const_iterator end() const { return d_params.end(); };
ExpTorsionAngleCollection(const std::string ¶mData);
private:
ParamsVect d_params; //!< the parameters
};
typedef boost::flyweight<
boost::flyweights::key_value<std::string, ExpTorsionAngleCollection>,
boost::flyweights::no_tracking>
param_flyweight;
const ExpTorsionAngleCollection *ExpTorsionAngleCollection::getParams(
unsigned int version, bool useSmallRingTorsions, bool useMacrocycleTorsions, const std::string ¶mData) {
std::string params;
if (paramData == "") {
switch (version) {
case 1:
params = torsionPreferencesV1;
break;
case 2:
params = torsionPreferencesV2;
break;
default:
throw ValueErrorException("ETversion must be 1 or 2.");
}
} else {
params = paramData;
}
if (useSmallRingTorsions)
params += torsionPreferencesSmallRings;
if (useMacrocycleTorsions)
params += torsionPreferencesMacrocycles;
const ExpTorsionAngleCollection *res = &(param_flyweight(params).get());
return res;
}
ExpTorsionAngleCollection::ExpTorsionAngleCollection(
const std::string ¶mData) {
boost::char_separator<char> tabSep(" ", "", boost::drop_empty_tokens);
std::istringstream inStream(paramData);
std::string inLine = RDKit::getLine(inStream);
while (!inStream.eof()) {
if (inLine[0] != '#') {
ExpTorsionAngle angle;
tokenizer tokens(inLine, tabSep);
tokenizer::iterator token = tokens.begin();
angle.smarts = *token;
++token;
for (unsigned int i = 0; i < 12; i += 2) {
angle.signs.push_back(boost::lexical_cast<int>(*token));
++token;
angle.V.push_back(boost::lexical_cast<double>(*token));
++token;
}
angle.dp_pattern =
boost::shared_ptr<const ROMol>(SmartsToMol(angle.smarts));
// get the atom indices for atom 1, 2, 3, 4 in the pattern
for (unsigned int i = 0; i < (angle.dp_pattern.get())->getNumAtoms();
++i) {
Atom const *atom = (angle.dp_pattern.get())->getAtomWithIdx(i);
int num;
if (atom->getPropIfPresent("molAtomMapNumber", num)) {
if (num > 0 && num < 5) {
angle.idx[num - 1] = i;
}
}
}
d_params.push_back(angle);
}
inLine = RDKit::getLine(inStream);
} // while loop
// std::cerr << "Exp. torsion angles = " << d_params.size() << " "
// << d_params[d_params.size()-1].smarts << std::endl;
}
void getExperimentalTorsions(const RDKit::ROMol &mol, CrystalFFDetails &details,
bool useExpTorsions,
bool useSmallRingTorsions,bool useMacrocycleTorsions,
bool useBasicKnowledge,
unsigned int version, bool verbose) {
unsigned int nb = mol.getNumBonds();
unsigned int na = mol.getNumAtoms();
if (!na) {
throw ValueErrorException("molecule has no atoms");
}
// check that vectors are empty
details.expTorsionAtoms.clear();
details.expTorsionAngles.clear();
details.improperAtoms.clear();
unsigned int aid1, aid2, aid3, aid4;
unsigned int bid2;
// exclude bonds in bridged ring systems
boost::dynamic_bitset<> excludedBonds(nb);
const RingInfo *rinfo = mol.getRingInfo();
const VECT_INT_VECT &bondRings = rinfo->bondRings();
VECT_INT_VECT_CI rii, rjj;
for (rii = bondRings.begin(); rii != bondRings.end(); ++rii) {
boost::dynamic_bitset<> rs1(nb); // bitset for ring 1
for (unsigned int i = 0; i < rii->size(); i++) {
rs1[(*rii)[i]] = 1;
}
for (rjj = rii+1; rjj != bondRings.end(); ++rjj) {
unsigned int nInCommon = 0;
for (auto rjj_i : *rjj) {
if (rs1[rjj_i]) {
++nInCommon;
if (nInCommon > 1) {
break;
}
}
}
if (nInCommon > 1) { // more than one bond in common
for (unsigned int i = 0; i < rii->size(); i++) {
excludedBonds[(*rii)[i]] = 1; // exclude all bonds of ring 1
}
for (unsigned int i = 0; i < rjj->size(); i++) {
excludedBonds[(*rjj)[i]] = 1; // exclude all bonds of ring 2
}
}
}
}
boost::dynamic_bitset<> doneBonds(nb);
if (useExpTorsions) {
// we set the torsion angles with experimental data
const ExpTorsionAngleCollection *params =
ExpTorsionAngleCollection::getParams(version, useSmallRingTorsions, useMacrocycleTorsions);
// loop over patterns
for (const auto ¶m : *params) {
std::vector<MatchVectType> matches;
SubstructMatch(mol, *(param.dp_pattern.get()), matches, false, true);
// loop over matches
for (std::vector<MatchVectType>::const_iterator matchIt = matches.begin();
matchIt != matches.end(); ++matchIt) {
// get bond indices
aid1 = (*matchIt)[param.idx[0]].second;
aid2 = (*matchIt)[param.idx[1]].second;
aid3 = (*matchIt)[param.idx[2]].second;
aid4 = (*matchIt)[param.idx[3]].second;
// FIX: check if bond is NULL
bid2 = mol.getBondBetweenAtoms(aid2, aid3)->getIdx();
// check that a bond is part of maximum one ring
if (mol.getRingInfo()->numBondRings(bid2) > 1 || excludedBonds[bid2] == 1) {
doneBonds[bid2] = 1;
}
if (!doneBonds[bid2]) {
doneBonds[bid2] = 1;
std::vector<int> atoms(4);
atoms[0] = aid1;
atoms[1] = aid2;
atoms[2] = aid3;
atoms[3] = aid4;
details.expTorsionAtoms.push_back(atoms);
details.expTorsionAngles.emplace_back(param.signs, param.V);
if (verbose) {
std::cout << param.smarts << ": " << aid1 << " " << aid2 << " "
<< aid3 << " " << aid4 << ", (";
for (unsigned int i = 0; i < param.V.size() - 1; ++i) {
std::cout << param.V[i] << ", ";
}
std::cout << param.V[param.V.size() - 1] << ") " << std::endl;
}
} // if not donePaths
} // end loop over matches
} // end loop over patterns
}
// apply basic knowledge such as flat aromatic rings, other sp2-centers,
// straight triple bonds, etc.
if (useBasicKnowledge) {
boost::dynamic_bitset<> doneAtoms(na);
ROMol::ADJ_ITER nbrIdx;
ROMol::ADJ_ITER endNbrs;
// inversion terms (improper torsions / out-of-plane bends / inversion)
// loop over atoms
for (aid2 = 0; aid2 < na; ++aid2) {
if (!(doneAtoms[aid2])) {
std::vector<int> atoms(4, -1);
atoms[1] = aid2;
const Atom *atom2 = mol.getAtomWithIdx(atoms[1]);
int at2AtomicNum = atom2->getAtomicNum();
// if atom is a N,O or C and SP2-hybridized
if (((at2AtomicNum == 6) || (at2AtomicNum == 7) ||
(at2AtomicNum == 8)) &&
(atom2->getHybridization() == Atom::SP2)) {
// get neighbors
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom2);
// check if enough neighbours
if (mol.getAtomDegree(atom2) != 3) {
continue;
}
unsigned int i = 0;
unsigned int isBoundToSP2O = 0; // false
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *atomX = mol[*nbrIdx];
atoms[i] = atomX->getIdx();
// if the central atom is sp2 carbon and is bound to sp2 oxygen, set
// a flag
if (!isBoundToSP2O) {
isBoundToSP2O =
((at2AtomicNum == 6) && (atomX->getAtomicNum() == 8) &&
(atomX->getHybridization() == Atom::SP2));
}
if (!i) {
++i;
}
++i;
}
atoms.push_back(at2AtomicNum);
atoms.push_back(isBoundToSP2O);
details.improperAtoms.push_back(atoms);
/*if (verbose) {
std::cout << "out-of-plane bend: " << atoms[0] << " " << atoms[1] <<
" "
<< atoms[2] << " " << atoms[3] << std::endl;
}*/
}
} // if atom is a N,O or C and SP2-hybridized
}
// torsions for flat rings
const RingInfo *rinfo =
mol.getRingInfo(); // FIX: make sure we have ring info
CHECK_INVARIANT(rinfo, "");
const VECT_INT_VECT &atomRings = rinfo->atomRings();
for (const auto &atomRing : atomRings) {
unsigned int rSize = atomRing.size();
// we don't need to deal with 3 membered rings
// and we do not treat rings greater than 6
if (rSize < 4 || rSize > 6) {
continue;
}
// loop over ring atoms
for (unsigned int i = 0; i < rSize; ++i) {
// proper torsions
aid1 = atomRing[i];
aid2 = atomRing[(i + 1) % rSize];
aid3 = atomRing[(i + 2) % rSize];
aid4 = atomRing[(i + 3) % rSize];
bid2 = mol.getBondBetweenAtoms(aid2, aid3)->getIdx();
// if all 4 atoms are SP2, add torsion
if (!(doneBonds[bid2]) &&
(mol.getAtomWithIdx(aid1)->getHybridization() == Atom::SP2) &&
(mol.getAtomWithIdx(aid2)->getHybridization() == Atom::SP2) &&
(mol.getAtomWithIdx(aid3)->getHybridization() == Atom::SP2) &&
(mol.getAtomWithIdx(aid4)->getHybridization() == Atom::SP2)) {
doneBonds[bid2] = 1;
std::vector<int> atoms(4);
atoms[0] = aid1;
atoms[1] = aid2;
atoms[2] = aid3;
atoms[3] = aid4;
details.expTorsionAtoms.push_back(atoms);
std::vector<int> signs(6, 1);
signs[1] = -1; // MMFF sign for m = 2
std::vector<double> fconsts(6, 0.0);
fconsts[1] = 100.0; // 7.0 is MMFF force constants for aromatic rings
details.expTorsionAngles.emplace_back(signs, fconsts);
/*if (verbose) {
std::cout << "SP2 ring: " << aid1 << " " << aid2 << " " << aid3 << "
" << aid4 << std::endl;
}*/
}
} // loop over atoms in ring
} // loop over rings
} // if useBasicKnowledge
} // end function
} // namespace CrystalFF
} // namespace ForceFields
| 1 | 22,552 | Are the coefficients the actual bug fix? | rdkit-rdkit | cpp |
@@ -218,8 +218,8 @@ class PostgresTarget(luigi.Target):
try:
cursor.execute(sql)
- except psycopg2.ProgrammingError as e:
- if e.pgcode == psycopg2.errorcodes.DUPLICATE_TABLE:
+ except (psycopg2.ProgrammingError, psycopg2.IntegrityError) as e:
+ if e.pgcode in [psycopg2.errorcodes.DUPLICATE_TABLE, psycopg2.errorcodes.UNIQUE_VIOLATION]:
pass
else:
raise | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implements a subclass of :py:class:`~luigi.target.Target` that writes data to Postgres.
Also provides a helper task to copy data into a Postgres table.
"""
import datetime
import logging
import re
import tempfile
from luigi import six
import luigi
from luigi.contrib import rdbms
logger = logging.getLogger('luigi-interface')
try:
import psycopg2
import psycopg2.errorcodes
import psycopg2.extensions
except ImportError:
logger.warning("Loading postgres module without psycopg2 installed. Will crash at runtime if postgres functionality is used.")
class MultiReplacer(object):
"""
Object for one-pass replace of multiple words
Substituted parts will not be matched against other replace patterns, as opposed to when using multipass replace.
The order of the items in the replace_pairs input will dictate replacement precedence.
Constructor arguments:
replace_pairs -- list of 2-tuples which hold strings to be replaced and replace string
Usage:
.. code-block:: python
>>> replace_pairs = [("a", "b"), ("b", "c")]
>>> MultiReplacer(replace_pairs)("abcd")
'bccd'
>>> replace_pairs = [("ab", "x"), ("a", "x")]
>>> MultiReplacer(replace_pairs)("ab")
'x'
>>> replace_pairs.reverse()
>>> MultiReplacer(replace_pairs)("ab")
'xb'
"""
# TODO: move to misc/util module
def __init__(self, replace_pairs):
"""
Initializes a MultiReplacer instance.
:param replace_pairs: list of 2-tuples which hold strings to be replaced and replace string.
:type replace_pairs: tuple
"""
replace_list = list(replace_pairs) # make a copy in case input is iterable
self._replace_dict = dict(replace_list)
pattern = '|'.join(re.escape(x) for x, y in replace_list)
self._search_re = re.compile(pattern)
def _replacer(self, match_object):
# this method is used as the replace function in the re.sub below
return self._replace_dict[match_object.group()]
def __call__(self, search_string):
# using function replacing for a per-result replace
return self._search_re.sub(self._replacer, search_string)
# these are the escape sequences recognized by postgres COPY
# according to http://www.postgresql.org/docs/8.1/static/sql-copy.html
default_escape = MultiReplacer([('\\', '\\\\'),
('\t', '\\t'),
('\n', '\\n'),
('\r', '\\r'),
('\v', '\\v'),
('\b', '\\b'),
('\f', '\\f')
])
class PostgresTarget(luigi.Target):
"""
Target for a resource in Postgres.
This will rarely have to be directly instantiated by the user.
"""
marker_table = luigi.configuration.get_config().get('postgres', 'marker-table', 'table_updates')
# Use DB side timestamps or client side timestamps in the marker_table
use_db_timestamps = True
def __init__(
self, host, database, user, password, table, update_id, port=5432
):
"""
Args:
host (str): Postgres server address. Possibly a host:port string.
database (str): Database name
user (str): Database user
password (str): Password for specified user
update_id (str): An identifier for this data set
port (int): Postgres server port.
"""
if ':' in host:
self.host, self.port = host.split(':')
else:
self.host = host
self.port = port
self.database = database
self.user = user
self.password = password
self.table = table
self.update_id = update_id
def touch(self, connection=None):
"""
Mark this update as complete.
Important: If the marker table doesn't exist, the connection transaction will be aborted
and the connection reset.
Then the marker table will be created.
"""
self.create_marker_table()
if connection is None:
# TODO: test this
connection = self.connect()
connection.autocommit = True # if connection created here, we commit it here
if self.use_db_timestamps:
connection.cursor().execute(
"""INSERT INTO {marker_table} (update_id, target_table)
VALUES (%s, %s)
""".format(marker_table=self.marker_table),
(self.update_id, self.table))
else:
connection.cursor().execute(
"""INSERT INTO {marker_table} (update_id, target_table, inserted)
VALUES (%s, %s, %s);
""".format(marker_table=self.marker_table),
(self.update_id, self.table,
datetime.datetime.now()))
def exists(self, connection=None):
if connection is None:
connection = self.connect()
connection.autocommit = True
cursor = connection.cursor()
try:
cursor.execute("""SELECT 1 FROM {marker_table}
WHERE update_id = %s
LIMIT 1""".format(marker_table=self.marker_table),
(self.update_id,)
)
row = cursor.fetchone()
except psycopg2.ProgrammingError as e:
if e.pgcode == psycopg2.errorcodes.UNDEFINED_TABLE:
row = None
else:
raise
return row is not None
def connect(self):
"""
Get a psycopg2 connection object to the database where the table is.
"""
connection = psycopg2.connect(
host=self.host,
port=self.port,
database=self.database,
user=self.user,
password=self.password)
connection.set_client_encoding('utf-8')
return connection
def create_marker_table(self):
"""
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
"""
connection = self.connect()
connection.autocommit = True
cursor = connection.cursor()
if self.use_db_timestamps:
sql = """ CREATE TABLE {marker_table} (
update_id TEXT PRIMARY KEY,
target_table TEXT,
inserted TIMESTAMP DEFAULT NOW())
""".format(marker_table=self.marker_table)
else:
sql = """ CREATE TABLE {marker_table} (
update_id TEXT PRIMARY KEY,
target_table TEXT,
inserted TIMESTAMP);
""".format(marker_table=self.marker_table)
try:
cursor.execute(sql)
except psycopg2.ProgrammingError as e:
if e.pgcode == psycopg2.errorcodes.DUPLICATE_TABLE:
pass
else:
raise
connection.close()
def open(self, mode):
raise NotImplementedError("Cannot open() PostgresTarget")
class CopyToTable(rdbms.CopyToTable):
"""
Template task for inserting a data set into Postgres
Usage:
Subclass and override the required `host`, `database`, `user`,
`password`, `table` and `columns` attributes.
To customize how to access data from an input task, override the `rows` method
with a generator that yields each row as a tuple with fields ordered according to `columns`.
"""
def rows(self):
"""
Return/yield tuples or lists corresponding to each row to be inserted.
"""
with self.input().open('r') as fobj:
for line in fobj:
yield line.strip('\n').split('\t')
def map_column(self, value):
"""
Applied to each column of every row returned by `rows`.
Default behaviour is to escape special characters and identify any self.null_values.
"""
if value in self.null_values:
return r'\\N'
else:
return default_escape(six.text_type(value))
# everything below will rarely have to be overridden
def output(self):
"""
Returns a PostgresTarget representing the inserted dataset.
Normally you don't override this.
"""
return PostgresTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.table,
update_id=self.update_id
)
def copy(self, cursor, file):
if isinstance(self.columns[0], six.string_types):
column_names = self.columns
elif len(self.columns[0]) == 2:
column_names = [c[0] for c in self.columns]
else:
raise Exception('columns must consist of column strings or (column string, type string) tuples (was %r ...)' % (self.columns[0],))
cursor.copy_from(file, self.table, null=r'\\N', sep=self.column_separator, columns=column_names)
def run(self):
"""
Inserts data generated by rows() into target table.
If the target table doesn't exist, self.create_table will be called to attempt to create the table.
Normally you don't want to override this.
"""
if not (self.table and self.columns):
raise Exception("table and columns need to be specified")
connection = self.output().connect()
# transform all data generated by rows() using map_column and write data
# to a temporary file for import using postgres COPY
tmp_dir = luigi.configuration.get_config().get('postgres', 'local-tmp-dir', None)
tmp_file = tempfile.TemporaryFile(dir=tmp_dir)
n = 0
for row in self.rows():
n += 1
if n % 100000 == 0:
logger.info("Wrote %d lines", n)
rowstr = self.column_separator.join(self.map_column(val) for val in row)
rowstr += "\n"
tmp_file.write(rowstr.encode('utf-8'))
logger.info("Done writing, importing at %s", datetime.datetime.now())
tmp_file.seek(0)
# attempt to copy the data into postgres
# if it fails because the target table doesn't exist
# try to create it by running self.create_table
for attempt in range(2):
try:
cursor = connection.cursor()
self.init_copy(connection)
self.copy(cursor, tmp_file)
self.post_copy(connection)
except psycopg2.ProgrammingError as e:
if e.pgcode == psycopg2.errorcodes.UNDEFINED_TABLE and attempt == 0:
# if first attempt fails with "relation not found", try creating table
logger.info("Creating table %s", self.table)
connection.reset()
self.create_table(connection)
else:
raise
else:
break
# mark as complete in same transaction
self.output().touch(connection)
# commit and clean up
connection.commit()
connection.close()
tmp_file.close()
class PostgresQuery(rdbms.Query):
"""
Template task for querying a Postgres compatible database
Usage:
Subclass and override the required `host`, `database`, `user`, `password`, `table`, and `query` attributes.
Optionally one can override the `autocommit` attribute to put the connection for the query in autocommit mode.
Override the `run` method if your use case requires some action with the query result.
Task instances require a dynamic `update_id`, e.g. via parameter(s), otherwise the query will only execute once
To customize the query signature as recorded in the database marker table, override the `update_id` property.
"""
def run(self):
connection = self.output().connect()
connection.autocommit = self.autocommit
cursor = connection.cursor()
sql = self.query
logger.info('Executing query from task: {name}'.format(name=self.__class__))
cursor.execute(sql)
# Update marker table
self.output().touch(connection)
# commit and close connection
connection.commit()
connection.close()
def output(self):
"""
Returns a PostgresTarget representing the executed query.
Normally you don't override this.
"""
return PostgresTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.table,
update_id=self.update_id
)
| 1 | 17,923 | Why are the error code options in a list and not a tuple? | spotify-luigi | py |
@@ -135,7 +135,7 @@ class ExternalProjectAccessRuleBookTest(ForsetiTestCase):
TEST_ANCESTORS = [Project('123'),
Folder('456'),
Organization('7890')]
- TEST_BAD_ANC = [Project('123'),
+ TEST_BAD_ANCESTORS = [Project('123'),
Folder('456'),
Organization('ABC')]
| 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the CloudSqlRulesEngine."""
# pylint: disable=line-too-long
import unittest
import mock
from google.cloud.forseti.common.gcp_type.folder import Folder
from google.cloud.forseti.common.gcp_type.organization import Organization
from google.cloud.forseti.common.gcp_type.project import Project
from google.cloud.forseti.scanner.audit import external_project_access_rules_engine as engine_module
from google.cloud.forseti.scanner.audit import errors as audit_errors
from tests.unittest_utils import ForsetiTestCase
from tests.unittest_utils import get_datafile_path
# pylint: enable=line-too-long
class ExternalProjectAccessRulesEngineTest(ForsetiTestCase):
"""Tests for the ExternalProjectAccessRulesEngine."""
TEST_ANCESTRIES = {
'[email protected]': [Project('13579'),
Folder('24680'),
Organization('1234567')],
'[email protected]': [Project('13579'),
Folder('0987654321'),
Organization('1234567')]}
TEST_ANCESTRIES_SIMPLE = {
'[email protected]': [Project('13579'),
Organization('567890')]}
TEST_ANCESTRIES_VIOLATIONS = {
'[email protected]': [Project('13579'),
Folder('24680'),
Organization('1357924680')]}
def setUp(self):
self.rules_engine = engine_module
self.rules_engine.LOGGER = mock.MagicMock()
self.inventory_config = mock.MagicMock()
self.inventory_config.get_root_resource_id = (
mock.MagicMock(return_value='organizations/567890'))
def test_no_rule_added(self):
"""Test that a RuleBook is built correctly with an empty yaml file."""
rules_local_path = get_datafile_path(
__file__, 'external_project_access_test_rules_0.yaml')
rules_engine = engine_module.ExternalProjectAccessRulesEngine(
rules_file_path=rules_local_path)
rules_engine.build_rule_book(self.inventory_config)
self.assertEqual(0, len(rules_engine.rule_book.resource_rules_map))
def test_good_yaml_file(self):
"""Test that a RuleBook is built correctly with a yaml file."""
rules_local_path = get_datafile_path(
__file__, 'external_project_access_test_rules_1.yaml')
rules_engine = engine_module.ExternalProjectAccessRulesEngine(
rules_file_path=rules_local_path)
rules_engine.build_rule_book(self.inventory_config)
self.assertEqual(2, len(rules_engine.rule_book.resource_rules_map))
def test_yaml_file_bad_ancestor(self):
"""Test that a RuleBook is built correctly with a yaml file."""
rules_local_path = get_datafile_path(
__file__, 'external_project_access_test_rules_2.yaml')
rules_engine = engine_module.ExternalProjectAccessRulesEngine(
rules_file_path=rules_local_path)
with self.assertRaises(audit_errors.InvalidRulesSchemaError):
rules_engine.build_rule_book(self.inventory_config)
def test_no_violations(self):
"""Test that no violations are found"""
all_violations = []
rules_local_path = get_datafile_path(
__file__, 'external_project_access_test_rules_1.yaml')
rules_engine = engine_module.ExternalProjectAccessRulesEngine(
rules_file_path=rules_local_path)
rules_engine.build_rule_book(self.inventory_config)
for user, ancestry in self.TEST_ANCESTRIES.iteritems():
violations = rules_engine.find_policy_violations(
user, ancestry, True)
all_violations.extend(violations)
self.assertEqual(len(all_violations), 0)
def test_no_violations_no_rules(self):
"""Test that no violations are found when no rules in the file."""
all_violations = []
rules_local_path = get_datafile_path(
__file__, 'external_project_access_test_rules_0.yaml')
rules_engine = engine_module.ExternalProjectAccessRulesEngine(
rules_file_path=rules_local_path)
rules_engine.build_rule_book(self.inventory_config)
for user, ancestry in self.TEST_ANCESTRIES_SIMPLE.iteritems():
violations = rules_engine.find_policy_violations(
user, ancestry, True)
all_violations.extend(violations)
self.assertEqual(len(all_violations), 0)
def test_violations_are_found(self):
"""Test that violations are found"""
all_violations = []
rules_local_path = get_datafile_path(
__file__, 'external_project_access_test_rules_1.yaml')
rules_engine = engine_module.ExternalProjectAccessRulesEngine(
rules_file_path=rules_local_path)
rules_engine.build_rule_book(self.inventory_config)
for user, ancestry in self.TEST_ANCESTRIES_VIOLATIONS.iteritems():
violations = rules_engine.find_policy_violations(user,
ancestry,
True)
all_violations.extend(violations)
self.assertEqual(len(all_violations), 2)
class ExternalProjectAccessRuleBookTest(ForsetiTestCase):
"""Tests for the ExternalProjectAccessRuleBook."""
TEST_GOOD_RULE = dict(name='default',
allowed_ancestors=['organizations/7890'])
TEST_BAD_RULE = dict(name='default',
allowed_ancestors=['policy/12345'])
TEST_RULE_DEFS = dict(rules=[TEST_GOOD_RULE])
TEST_ANCESTORS = [Project('123'),
Folder('456'),
Organization('7890')]
TEST_BAD_ANC = [Project('123'),
Folder('456'),
Organization('ABC')]
def setUp(self):
"""Set up."""
self.rule_index = 0
self.rules_engine = engine_module
self.rules_engine.LOGGER = mock.MagicMock()
self.inventory_config = mock.MagicMock()
self.inventory_config.get_root_resource_id = (
mock.MagicMock(return_value='organizations/7890'))
self.rule_book = (
engine_module.ExternalProjectAccessRuleBook(self.inventory_config))
def test_validate_good_ancestor(self):
"""Test proper rule validation"""
self.rule_book.validate_ancestors(
self.TEST_GOOD_RULE['allowed_ancestors'], 0)
def test_validate_bad_ancestor(self):
"""Test proper rule validation against bad ancestor"""
with self.assertRaises(audit_errors.InvalidRulesSchemaError):
self.rule_book.validate_ancestors(
self.TEST_BAD_RULE['allowed_ancestors'], 0)
def test_missing_ancestors(self):
"""Test proper rule validation against missing ancestors"""
with self.assertRaises(audit_errors.InvalidRulesSchemaError):
self.rule_book.validate_ancestor(None, 0)
def test_process_good_rule(self):
"""Test proper rule processing"""
resources = self.rule_book.process_rule(self.TEST_GOOD_RULE, 0)
self.assertEqual(resources[0].id, '7890')
self.assertTrue(isinstance(resources[0], Organization))
def test_process_bad_rule(self):
"""Test proper rule validation with exception"""
with self.assertRaises(audit_errors.InvalidRulesSchemaError):
self.rule_book.process_rule(self.TEST_BAD_RULE, 0)
def test_add_rule(self):
"""Test proper rule addition"""
self.rule_book.add_rule(self.TEST_GOOD_RULE, 0)
self.assertEqual(1, len(self.rule_book.resource_rules_map))
def test_add_rules(self):
"""Test proper addtion of multiple rules"""
self.rule_book.add_rules(self.TEST_RULE_DEFS)
self.assertEqual(1, len(self.rule_book.resource_rules_map))
def test_no_violations(self):
"""Test no violations are found"""
violations = self.rule_book.find_policy_violations('[email protected]',
self.TEST_ANCESTORS)
self.assertEqual(0, len(violations))
def test_violations(self):
"""Test violations are found"""
violations = self.rule_book.find_policy_violations(
'[email protected]', self.TEST_BAD_ANC)
self.assertEqual(0, len(violations))
class ExternalProjectAccessRuleTest(ForsetiTestCase):
"""Tests for the ExternalProjectAccessRuleBook."""
TEST_ANCESTORS = [Project('123'),
Folder('456'),
Organization('7890')]
def test_single_item_in_rule_match(self):
"""Test no violations are found with single item in rule"""
rule = engine_module.Rule(rule_name='test_single_item_in_rule_match',
rule_index=0,
rules=[Organization('7890')])
violation = rule.find_violation('[email protected]',
self.TEST_ANCESTORS)
self.assertIsNone(violation)
def test_multi_items_in_rule_match(self):
"""Test no violations are found with multiple items in rule"""
rule = engine_module.Rule(rule_name='test_multi_items_in_rule_match',
rule_index=0,
rules=[Folder('456'), Organization('7890')])
violation = rule.find_violation('[email protected]',
self.TEST_ANCESTORS)
self.assertIsNone(violation)
def test_single_item_no_match(self):
"""Test violations are found with single item in rule"""
rule = engine_module.Rule(rule_name='test_single_item_no_match',
rule_index=0,
rules=[Organization('789')])
violation = rule.find_violation('[email protected]',
self.TEST_ANCESTORS)
self.assertEqual(0, violation.rule_index)
self.assertEqual('test_single_item_no_match',
violation.rule_name)
self.assertEqual('projects/123', violation.full_name)
self.assertEqual('projects/123,folders/456,organizations/7890',
violation.resource_data)
def test_multi_items_no_match(self):
"""Test violations are found with multiple items in rule"""
rule = engine_module.Rule(rule_name='test_multi_items_no_match',
rule_index=0,
rules=[Folder('45'), Organization('789')])
violation = rule.find_violation('[email protected]',
self.TEST_ANCESTORS)
self.assertEqual(0, violation.rule_index)
self.assertEqual('test_multi_items_no_match',
violation.rule_name)
self.assertEqual('projects/123', violation.full_name)
self.assertEqual('projects/123,folders/456,organizations/7890',
violation.resource_data)
if __name__ == '__main__':
unittest.main()
| 1 | 32,478 | Need alignment with the preceding line. | forseti-security-forseti-security | py |
@@ -28,10 +28,8 @@ public class MainnetProtocolSchedule {
public static final BigInteger DEFAULT_CHAIN_ID = BigInteger.ONE;
- public static ProtocolSchedule create() {
- return fromConfig(
- GenesisConfigFile.mainnet().getConfigOptions(), PrivacyParameters.DEFAULT, false);
- }
+ public static final ProtocolSchedule DEFAULT =
+ fromConfig(GenesisConfigFile.getMainnetConfigOptions(), PrivacyParameters.DEFAULT, false);
/**
* Create a Mainnet protocol schedule from a config object | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.mainnet;
import org.hyperledger.besu.config.GenesisConfigFile;
import org.hyperledger.besu.config.GenesisConfigOptions;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.difficulty.fixed.FixedDifficultyCalculators;
import org.hyperledger.besu.ethereum.difficulty.fixed.FixedDifficultyProtocolSchedule;
import java.math.BigInteger;
import java.util.function.Function;
/** Provides {@link ProtocolSpec} lookups for mainnet hard forks. */
public class MainnetProtocolSchedule {
public static final BigInteger DEFAULT_CHAIN_ID = BigInteger.ONE;
public static ProtocolSchedule create() {
return fromConfig(
GenesisConfigFile.mainnet().getConfigOptions(), PrivacyParameters.DEFAULT, false);
}
/**
* Create a Mainnet protocol schedule from a config object
*
* @param config {@link GenesisConfigOptions} containing the config options for the milestone
* starting points
* @param privacyParameters the parameters set for private transactions
* @param isRevertReasonEnabled whether storing the revert reason is for failed transactions
* @return A configured mainnet protocol schedule
*/
public static ProtocolSchedule fromConfig(
final GenesisConfigOptions config,
final PrivacyParameters privacyParameters,
final boolean isRevertReasonEnabled) {
if (FixedDifficultyCalculators.isFixedDifficultyInConfig(config)) {
return FixedDifficultyProtocolSchedule.create(
config, privacyParameters, isRevertReasonEnabled);
}
return new ProtocolScheduleBuilder(
config,
DEFAULT_CHAIN_ID,
Function.identity(),
privacyParameters,
isRevertReasonEnabled,
config.isQuorum())
.createProtocolSchedule();
}
/**
* Create a Mainnet protocol schedule from a config object
*
* @param config {@link GenesisConfigOptions} containing the config options for the milestone
* starting points
* @param isRevertReasonEnabled whether storing the revert reason is for failed transactions
* @return A configured mainnet protocol schedule
*/
public static ProtocolSchedule fromConfig(
final GenesisConfigOptions config, final boolean isRevertReasonEnabled) {
return fromConfig(config, PrivacyParameters.DEFAULT, isRevertReasonEnabled);
}
/**
* Create a Mainnet protocol schedule from a config object
*
* @param config {@link GenesisConfigOptions} containing the config options for the milestone
* starting points
* @return A configured mainnet protocol schedule
*/
public static ProtocolSchedule fromConfig(final GenesisConfigOptions config) {
return fromConfig(config, PrivacyParameters.DEFAULT, false);
}
}
| 1 | 24,068 | Changing from a static method to a constant is inessential to Type Transactions and has a long reach, adding many unneeded files and diff lines. | hyperledger-besu | java |
@@ -92,13 +92,14 @@ public final class TreeSet<T> implements SortedSet<T>, Serializable {
public static <T extends Comparable<? super T>> TreeSet<T> ofAll(java.lang.Iterable<? extends T> values) {
Objects.requireNonNull(values, "values is null");
- return new TreeSet<>(RedBlackTree.ofAll(values));
+ return values.iterator().hasNext() ? new TreeSet<>(RedBlackTree.ofAll(values)) : empty();
}
+ @SuppressWarnings("unchecked")
public static <T> TreeSet<T> ofAll(Comparator<? super T> comparator, java.lang.Iterable<? extends T> values) {
Objects.requireNonNull(comparator, "comparator is null");
Objects.requireNonNull(values, "values is null");
- return new TreeSet<>(RedBlackTree.ofAll(comparator, values));
+ return values.iterator().hasNext() ? new TreeSet<>(RedBlackTree.ofAll(comparator, values)) : (TreeSet<T>) empty();
}
/** | 1 | /* / \____ _ ______ _____ / \____ ____ _____
* / \__ \/ \ / \__ \ / __// \__ \ / \/ __ \ Javaslang
* _/ // _\ \ \/ / _\ \\_ \/ // _\ \ /\ \__/ / Copyright 2014-2015 Daniel Dietrich
* /___/ \_____/\____/\_____/____/\___\_____/_/ \_/____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.Lazy;
import javaslang.Tuple2;
import javaslang.control.None;
import javaslang.control.Option;
import javaslang.control.Some;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.function.*;
import java.util.stream.Collector;
import static javaslang.collection.Comparators.naturalComparator;
/**
* SortedSet implementation, backed by a Red/Black Tree.
*
* @param <T> Component type
* @author Daniel Dietrich
* @since 2.0.0
*/
// DEV-NOTE: it is not possible to create an EMPTY TreeSet without a Comparator type in scope
public final class TreeSet<T> implements SortedSet<T>, Serializable {
private static final long serialVersionUID = 1L;
private final RedBlackTree<T> tree;
TreeSet(RedBlackTree<T> tree) {
this.tree = tree;
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link javaslang.collection.TreeSet}.
*
* @param <T> Component type of the List.
* @return A javaslang.collection.List Collector.
*/
public static <T> Collector<T, ArrayList<T>, TreeSet<T>> collector() {
final Supplier<ArrayList<T>> supplier = ArrayList::new;
final BiConsumer<ArrayList<T>, T> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<T>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<T>, TreeSet<T>> finisher = list -> TreeSet.ofAll(naturalComparator(), list);
return Collector.of(supplier, accumulator, combiner, finisher);
}
public static <T extends Comparable<? super T>> TreeSet<T> empty() {
return new TreeSet<>(RedBlackTree.<T> empty());
}
public static <T> TreeSet<T> empty(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return new TreeSet<>(RedBlackTree.empty(comparator));
}
public static <T extends Comparable<? super T>> TreeSet<T> of(T value) {
return new TreeSet<>(RedBlackTree.of(value));
}
public static <T> TreeSet<T> of(Comparator<? super T> comparator, T value) {
Objects.requireNonNull(comparator, "comparator is null");
return new TreeSet<>(RedBlackTree.of(comparator, value));
}
@SuppressWarnings({ "unchecked", "varargs" })
@SafeVarargs
public static <T extends Comparable<? super T>> TreeSet<T> of(T... values) {
Objects.requireNonNull(values, "values is null");
return new TreeSet<>(RedBlackTree.of(values));
}
@SuppressWarnings({ "unchecked", "varargs" })
@SafeVarargs
public static <T> TreeSet<T> of(Comparator<? super T> comparator, T... values) {
Objects.requireNonNull(comparator, "comparator is null");
Objects.requireNonNull(values, "values is null");
return new TreeSet<>(RedBlackTree.of(comparator, values));
}
public static <T extends Comparable<? super T>> TreeSet<T> ofAll(java.lang.Iterable<? extends T> values) {
Objects.requireNonNull(values, "values is null");
return new TreeSet<>(RedBlackTree.ofAll(values));
}
public static <T> TreeSet<T> ofAll(Comparator<? super T> comparator, java.lang.Iterable<? extends T> values) {
Objects.requireNonNull(comparator, "comparator is null");
Objects.requireNonNull(values, "values is null");
return new TreeSet<>(RedBlackTree.ofAll(comparator, values));
}
/**
* Creates a TreeSet based on the elements of a boolean array.
*
* @param array a boolean array
* @return A new TreeSet of Boolean values
*/
public static TreeSet<Boolean> ofAll(boolean[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet based on the elements of a byte array.
*
* @param array a byte array
* @return A new TreeSet of Byte values
*/
public static TreeSet<Byte> ofAll(byte[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet based on the elements of a char array.
*
* @param array a char array
* @return A new TreeSet of Character values
*/
public static TreeSet<Character> ofAll(char[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet based on the elements of a double array.
*
* @param array a double array
* @return A new TreeSet of Double values
*/
public static TreeSet<Double> ofAll(double[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet based on the elements of a float array.
*
* @param array a float array
* @return A new TreeSet of Float values
*/
public static TreeSet<Float> ofAll(float[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet based on the elements of an int array.
*
* @param array an int array
* @return A new TreeSet of Integer values
*/
public static TreeSet<Integer> ofAll(int[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet based on the elements of a long array.
*
* @param array a long array
* @return A new TreeSet of Long values
*/
public static TreeSet<Long> ofAll(long[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet based on the elements of a short array.
*
* @param array a short array
* @return A new TreeSet of Short values
*/
public static TreeSet<Short> ofAll(short[] array) {
Objects.requireNonNull(array, "array is null");
return TreeSet.ofAll(Iterator.ofAll(array));
}
/**
* Creates a TreeSet of int numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.range(0, 0) // = TreeSet()
* TreeSet.range(2, 0) // = TreeSet()
* TreeSet.range(-2, 2) // = TreeSet(-2, -1, 0, 1)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of int values as specified or the empty range if {@code from >= toExclusive}
*/
public static TreeSet<Integer> range(int from, int toExclusive) {
return TreeSet.rangeBy(from, toExclusive, 1);
}
/**
* Creates a TreeSet of int numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.rangeBy(1, 3, 1) // = TreeSet(1, 2)
* TreeSet.rangeBy(1, 4, 2) // = TreeSet(1, 3)
* TreeSet.rangeBy(4, 1, -2) // = TreeSet(4, 2)
* TreeSet.rangeBy(4, 1, 2) // = TreeSet()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or the empty range if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static TreeSet<Integer> rangeBy(int from, int toExclusive, int step) {
if (step == 0) {
throw new IllegalArgumentException("step cannot be 0");
} else if (from == toExclusive || step * (from - toExclusive) > 0) {
return TreeSet.empty();
} else {
final int one = (from < toExclusive) ? 1 : -1;
return TreeSet.rangeClosedBy(from, toExclusive - one, step);
}
}
/**
* Creates a TreeSet of long numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.range(0L, 0L) // = TreeSet()
* TreeSet.range(2L, 0L) // = TreeSet()
* TreeSet.range(-2L, 2L) // = TreeSet(-2L, -1L, 0L, 1L)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of long values as specified or the empty range if {@code from >= toExclusive}
*/
public static TreeSet<Long> range(long from, long toExclusive) {
return TreeSet.rangeBy(from, toExclusive, 1);
}
/**
* Creates a TreeSet of long numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.rangeBy(1L, 3L, 1L) // = TreeSet(1L, 2L)
* TreeSet.rangeBy(1L, 4L, 2L) // = TreeSet(1L, 3L)
* TreeSet.rangeBy(4L, 1L, -2L) // = TreeSet(4L, 2L)
* TreeSet.rangeBy(4L, 1L, 2L) // = TreeSet()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or the empty range if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static TreeSet<Long> rangeBy(long from, long toExclusive, long step) {
if (step == 0) {
throw new IllegalArgumentException("step cannot be 0");
} else if (from == toExclusive || step * (from - toExclusive) > 0) {
return TreeSet.empty();
} else {
final int one = (from < toExclusive) ? 1 : -1;
return TreeSet.rangeClosedBy(from, toExclusive - one, step);
}
}
/**
* Creates a TreeSet of int numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.rangeClosed(0, 0) // = TreeSet(0)
* TreeSet.rangeClosed(2, 0) // = TreeSet()
* TreeSet.rangeClosed(-2, 2) // = TreeSet(-2, -1, 0, 1, 2)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of int values as specified or the empty range if {@code from > toInclusive}
*/
public static TreeSet<Integer> rangeClosed(int from, int toInclusive) {
return TreeSet.rangeClosedBy(from, toInclusive, 1);
}
/**
* Creates a TreeSet of int numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.rangeClosedBy(1, 3, 1) // = TreeSet(1, 2, 3)
* TreeSet.rangeClosedBy(1, 4, 2) // = TreeSet(1, 3)
* TreeSet.rangeClosedBy(4, 1, -2) // = TreeSet(4, 2)
* TreeSet.rangeClosedBy(4, 1, 2) // = TreeSet()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or the empty range if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static TreeSet<Integer> rangeClosedBy(int from, int toInclusive, int step) {
if (step == 0) {
throw new IllegalArgumentException("step cannot be 0");
} else if (from == toInclusive) {
return TreeSet.of(from);
} else if (step * (from - toInclusive) > 0) {
return TreeSet.empty();
} else {
final int gap = (from - toInclusive) % step;
final int signum = (from < toInclusive) ? -1 : 1;
final int bound = from * signum;
TreeSet<Integer> result = TreeSet.empty();
for (int i = toInclusive + gap; i * signum <= bound; i -= step) {
result = result.add(i);
}
return result;
}
}
/**
* Creates a TreeSet of long numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.rangeClosed(0L, 0L) // = TreeSet(0L)
* TreeSet.rangeClosed(2L, 0L) // = TreeSet()
* TreeSet.rangeClosed(-2L, 2L) // = TreeSet(-2L, -1L, 0L, 1L, 2L)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of long values as specified or the empty range if {@code from > toInclusive}
*/
public static TreeSet<Long> rangeClosed(long from, long toInclusive) {
return TreeSet.rangeClosedBy(from, toInclusive, 1L);
}
/**
* Creates a TreeSet of long numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* TreeSet.rangeClosedBy(1L, 3L, 1L) // = TreeSet(1L, 2L, 3L)
* TreeSet.rangeClosedBy(1L, 4L, 2L) // = TreeSet(1L, 3L)
* TreeSet.rangeClosedBy(4L, 1L, -2L) // = TreeSet(4L, 2L)
* TreeSet.rangeClosedBy(4L, 1L, 2L) // = TreeSet()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or the empty range if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static TreeSet<Long> rangeClosedBy(long from, long toInclusive, long step) {
if (step == 0) {
throw new IllegalArgumentException("step cannot be 0");
} else if (from == toInclusive) {
return TreeSet.of(from);
} else if (step * (from - toInclusive) > 0) {
return TreeSet.empty();
} else {
final long gap = (from - toInclusive) % step;
final int signum = (from < toInclusive) ? -1 : 1;
final long bound = from * signum;
TreeSet<Long> result = TreeSet.empty();
for (long i = toInclusive + gap; i * signum <= bound; i -= step) {
result = result.add(i);
}
return result;
}
}
@Override
public TreeSet<T> add(T element) {
return new TreeSet<>(tree.insert(element));
}
@SuppressWarnings("unchecked")
@Override
public TreeSet<T> addAll(java.lang.Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (isEmpty() && elements instanceof TreeSet) {
return (TreeSet<T>) elements;
} else {
RedBlackTree<T> that = tree;
for (T element : elements) {
that = that.insert(element);
}
if (tree == that) {
return this;
} else {
return new TreeSet<>(that);
}
}
}
@Override
public TreeSet<T> clear() {
return isEmpty() ? this : new TreeSet<>(tree.clear());
}
@Override
public Comparator<? super T> comparator() {
return tree.comparator();
}
@SuppressWarnings("unchecked")
@Override
public TreeSet<T> diff(Set<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (elements instanceof TreeSet) {
final RedBlackTree<T> that = ((TreeSet<T>) elements).tree;
return new TreeSet<>(tree.difference(that));
} else {
return removeAll(elements);
}
}
@Override
public boolean contains(T element) {
return tree.contains(element);
}
@Override
public TreeSet<T> distinct() {
return this;
}
@Override
public TreeSet<T> distinctBy(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return TreeSet.ofAll(tree.comparator(), iterator().distinctBy(comparator));
}
@Override
public <U> TreeSet<T> distinctBy(Function<? super T, ? extends U> keyExtractor) {
Objects.requireNonNull(keyExtractor, "keyExtractor is null");
return TreeSet.ofAll(tree.comparator(), iterator().distinctBy(keyExtractor));
}
@Override
public TreeSet<T> drop(int n) {
if (n <= 0) {
return this;
} else {
return TreeSet.ofAll(tree.comparator(), iterator().drop(n));
}
}
@Override
public TreeSet<T> dropRight(int n) {
if (n <= 0) {
return this;
} else {
return TreeSet.ofAll(tree.comparator(), iterator().dropRight(n));
}
}
@Override
public TreeSet<T> dropWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return TreeSet.ofAll(tree.comparator(), iterator().dropWhile(predicate));
}
@Override
public TreeSet<T> filter(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return TreeSet.ofAll(tree.comparator(), iterator().filter(predicate));
}
@Override
public Option<T> findLast(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return iterator().findLast(predicate);
}
@Override
public <U> TreeSet<U> flatMap(Function<? super T, ? extends java.lang.Iterable<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return TreeSet.ofAll(naturalComparator(), iterator().flatMap(mapper));
}
@Override
public TreeSet<Object> flatten() {
return TreeSet.ofAll(naturalComparator(), iterator().flatten());
}
@Override
public <U> U foldRight(U zero, BiFunction<? super T, ? super U, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return iterator().foldRight(zero, f);
}
@Override
public <C> Map<C, TreeSet<T>> groupBy(Function<? super T, ? extends C> classifier) {
Objects.requireNonNull(classifier, "classifier is null");
return iterator()
.groupBy(classifier)
.map((key, iterator) -> new Map.Entry<>(key, TreeSet.ofAll(tree.comparator(), iterator)));
}
@Override
public boolean hasDefiniteSize() {
return true;
}
@Override
public T head() {
if (isEmpty()) {
throw new NoSuchElementException("head of empty TreeSet");
} else {
return tree.min().get();
}
}
@Override
public Option<T> headOption() {
return tree.min();
}
@Override
public TreeSet<T> init() {
if (isEmpty()) {
throw new UnsupportedOperationException("init of empty TreeSet");
} else {
return new TreeSet<>(tree.delete(tree.max().get()));
}
}
@Override
public Option<TreeSet<T>> initOption() {
return isEmpty() ? None.instance() : new Some<>(init());
}
@SuppressWarnings("unchecked")
@Override
public TreeSet<T> intersect(Set<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (elements instanceof TreeSet) {
final RedBlackTree<T> that = ((TreeSet<T>) elements).tree;
return new TreeSet<>(tree.intersection(that));
} else {
return retainAll(elements);
}
}
@Override
public boolean isEmpty() {
return tree.isEmpty();
}
@Override
public boolean isTraversableAgain() {
return true;
}
@Override
public Iterator<T> iterator() {
return tree.iterator();
}
@Override
public int length() {
return tree.size();
}
@Override
public <U> TreeSet<U> map(Function<? super T, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return TreeSet.ofAll(naturalComparator(), iterator().map(mapper));
}
@Override
public Tuple2<TreeSet<T>, TreeSet<T>> partition(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return iterator()
.partition(predicate)
.map(i1 -> TreeSet.ofAll(tree.comparator(), i1), i2 -> TreeSet.ofAll(tree.comparator(), i2));
}
@Override
public TreeSet<T> peek(Consumer<? super T> action) {
Objects.requireNonNull(action, "action is null");
if (!isEmpty()) {
action.accept(head());
}
return this;
}
@Override
public T reduceRight(BiFunction<? super T, ? super T, ? extends T> op) {
Objects.requireNonNull(op, "op is null");
return iterator().reduceRight(op);
}
@Override
public TreeSet<T> remove(T element) {
return new TreeSet<>(tree.delete(element));
}
@Override
public TreeSet<T> removeAll(java.lang.Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (isEmpty()) {
return this;
} else {
RedBlackTree<T> that = tree;
final java.util.Iterator<? extends T> iter = elements.iterator();
while (!that.isEmpty() && iter.hasNext()) {
that = that.delete(iter.next());
}
if (that == tree) {
return this;
} else {
return new TreeSet<>(that);
}
}
}
@Override
public TreeSet<T> replace(T currentElement, T newElement) {
if (tree.contains(currentElement)) {
return new TreeSet<>(tree.delete(currentElement).insert(newElement));
} else {
return this;
}
}
@Override
public TreeSet<T> replaceAll(T currentElement, T newElement) {
// a set has only one occurrence
return replace(currentElement, newElement);
}
@Override
public TreeSet<T> replaceAll(UnaryOperator<T> operator) {
Objects.requireNonNull(operator, "operator is null");
return TreeSet.ofAll(tree.comparator(), iterator().replaceAll(operator));
}
@Override
public TreeSet<T> retainAll(java.lang.Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (isEmpty()) {
return this;
} else {
final RedBlackTree<T> kept = RedBlackTree.ofAll(tree.comparator(), elements);
return new TreeSet<>(tree.intersection(kept));
}
}
@Override
public Tuple2<TreeSet<T>, TreeSet<T>> span(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return iterator()
.span(predicate)
.map(i1 -> TreeSet.ofAll(tree.comparator(), i1), i2 -> TreeSet.ofAll(tree.comparator(), i2));
}
@Override
public TreeSet<T> tail() {
if (isEmpty()) {
throw new UnsupportedOperationException("tail of empty TreeSet");
} else {
return new TreeSet<>(tree.delete(tree.min().get()));
}
}
@Override
public Option<TreeSet<T>> tailOption() {
return isEmpty() ? None.instance() : new Some<>(tail());
}
@Override
public TreeSet<T> take(int n) {
return TreeSet.ofAll(tree.comparator(), iterator().take(n));
}
@Override
public TreeSet<T> takeRight(int n) {
return TreeSet.ofAll(tree.comparator(), iterator().takeRight(n));
}
@Override
public TreeSet<T> takeUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return takeWhile(predicate.negate());
}
@Override
public TreeSet<T> takeWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return TreeSet.ofAll(tree.comparator(), iterator().takeWhile(predicate));
}
@SuppressWarnings("unchecked")
@Override
public TreeSet<T> union(Set<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (elements instanceof TreeSet) {
final RedBlackTree<T> that = ((TreeSet<T>) elements).tree;
return new TreeSet<>(tree.union(that));
} else {
return addAll(elements);
}
}
@Override
public <T1, T2> Tuple2<TreeSet<T1>, TreeSet<T2>> unzip(Function<? super T, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
return iterator()
.unzip(unzipper)
.map(i1 -> TreeSet.ofAll(naturalComparator(), i1), i2 -> TreeSet.ofAll(naturalComparator(), i2));
}
@Override
public <U> TreeSet<Tuple2<T, U>> zip(java.lang.Iterable<U> that) {
Objects.requireNonNull(that, "that is null");
final Comparator<Tuple2<T, U>> tuple2Comparator = Tuple2.comparator(tree.comparator(), naturalComparator());
return TreeSet.ofAll(tuple2Comparator, iterator().zip(that));
}
@Override
public <U> TreeSet<Tuple2<T, U>> zipAll(java.lang.Iterable<U> that, T thisElem, U thatElem) {
Objects.requireNonNull(that, "that is null");
final Comparator<Tuple2<T, U>> tuple2Comparator = Tuple2.comparator(tree.comparator(), naturalComparator());
return TreeSet.ofAll(tuple2Comparator, iterator().zipAll(that, thisElem, thatElem));
}
@Override
public TreeSet<Tuple2<T, Integer>> zipWithIndex() {
final Comparator<? super T> component1Comparator = tree.comparator();
final Comparator<Tuple2<T, Integer>> tuple2Comparator = (t1, t2) -> component1Comparator.compare(t1._1, t2._1);
return TreeSet.ofAll(tuple2Comparator, iterator().zipWithIndex());
}
// -- Object
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o instanceof TreeSet) {
final TreeSet<?> that = (TreeSet<?>) o;
return tree.equals(that.tree);
} else {
return false;
}
}
@Override
public int hashCode() {
return tree.hashCode();
}
@Override
public String toString() {
return "TreeSet" + tree.toString();
}
}
| 1 | 6,484 | There is one caveat: javaslang.collection.Iterator is Iterable. With Iterator it does not work. Let's revert this line. Does this happen elsewhere, too? I will double-check the code... | vavr-io-vavr | java |
@@ -659,7 +659,7 @@ class SimpleConfig(Logger):
except:
pass
- def format_amount(self, x, is_diff=False, whitespaces=False):
+ def format_amount(self, x, is_diff = True, whitespaces = False, add_thousands_sep = True):
return format_satoshis(
x,
num_zeros=self.num_zeros, | 1 | import json
import threading
import time
import os
import stat
import ssl
from decimal import Decimal
from typing import Union, Optional, Dict, Sequence, Tuple
from numbers import Real
from copy import deepcopy
from aiorpcx import NetAddress
from . import util
from . import constants
from .util import base_units, base_unit_name_to_decimal_point, decimal_point_to_base_unit_name, UnknownBaseUnit, DECIMAL_POINT_DEFAULT
from .util import format_satoshis, format_fee_satoshis
from .util import user_dir, make_dir, NoDynamicFeeEstimates, quantize_feerate
from .i18n import _
from .logging import get_logger, Logger
FEE_ETA_TARGETS = [25, 10, 5, 2]
FEE_DEPTH_TARGETS = [10000000, 5000000, 2000000, 1000000, 500000, 200000, 100000]
FEE_LN_ETA_TARGET = 2 # note: make sure the network is asking for estimates for this target
# satoshi per kbyte
FEERATE_MAX_DYNAMIC = 1500000
FEERATE_WARNING_HIGH_FEE = 600000
FEERATE_FALLBACK_STATIC_FEE = 150000
FEERATE_DEFAULT_RELAY = 1000
FEERATE_MAX_RELAY = 50000
FEERATE_STATIC_VALUES = [1000, 2000, 5000, 10000, 20000, 30000,
50000, 70000, 100000, 150000, 200000, 300000]
FEERATE_REGTEST_HARDCODED = 180000 # for eclair compat
FEE_RATIO_HIGH_WARNING = 0.05 # warn user if fee/amount for on-chain tx is higher than this
_logger = get_logger(__name__)
FINAL_CONFIG_VERSION = 3
class SimpleConfig(Logger):
"""
The SimpleConfig class is responsible for handling operations involving
configuration files.
There are two different sources of possible configuration values:
1. Command line options.
2. User configuration (in the user's config directory)
They are taken in order (1. overrides config options set in 2.)
"""
def __init__(self, options=None, read_user_config_function=None,
read_user_dir_function=None):
if options is None:
options = {}
Logger.__init__(self)
# This lock needs to be acquired for updating and reading the config in
# a thread-safe way.
self.lock = threading.RLock()
self.mempool_fees = None # type: Optional[Sequence[Tuple[Union[float, int], int]]]
self.fee_estimates = {} # type: Dict[int, int]
self.last_time_fee_estimates_requested = 0 # zero ensures immediate fees
# The following two functions are there for dependency injection when
# testing.
if read_user_config_function is None:
read_user_config_function = read_user_config
if read_user_dir_function is None:
self.user_dir = user_dir
else:
self.user_dir = read_user_dir_function
# The command line options
self.cmdline_options = deepcopy(options)
# don't allow to be set on CLI:
self.cmdline_options.pop('config_version', None)
# Set self.path and read the user config
self.user_config = {} # for self.get in electrum_path()
self.path = self.electrum_path()
self.user_config = read_user_config_function(self.path)
if not self.user_config:
# avoid new config getting upgraded
self.user_config = {'config_version': FINAL_CONFIG_VERSION}
self._not_modifiable_keys = set()
# config "upgrade" - CLI options
self.rename_config_keys(
self.cmdline_options, {'auto_cycle': 'auto_connect'}, True)
# config upgrade - user config
if self.requires_upgrade():
self.upgrade()
self._check_dependent_keys()
# units and formatting
self.decimal_point = self.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(self.get('num_zeros', 0))
self.amt_precision_post_satoshi = int(self.get('amt_precision_post_satoshi', 0))
def electrum_path(self):
# Read electrum_path from command line
# Otherwise use the user's default data directory.
path = self.get('electrum_path')
if path is None:
path = self.user_dir()
make_dir(path, allow_symlink=False)
if self.get('testnet'):
path = os.path.join(path, 'testnet')
make_dir(path, allow_symlink=False)
elif self.get('regtest'):
path = os.path.join(path, 'regtest')
make_dir(path, allow_symlink=False)
elif self.get('simnet'):
path = os.path.join(path, 'simnet')
make_dir(path, allow_symlink=False)
elif self.get('signet'):
path = os.path.join(path, 'signet')
make_dir(path, allow_symlink=False)
self.logger.info(f"electrum directory {path}")
return path
def rename_config_keys(self, config, keypairs, deprecation_warning=False):
"""Migrate old key names to new ones"""
updated = False
for old_key, new_key in keypairs.items():
if old_key in config:
if new_key not in config:
config[new_key] = config[old_key]
if deprecation_warning:
self.logger.warning('Note that the {} variable has been deprecated. '
'You should use {} instead.'.format(old_key, new_key))
del config[old_key]
updated = True
return updated
def set_key(self, key, value, save=True):
if not self.is_modifiable(key):
self.logger.warning(f"not changing config key '{key}' set on the command line")
return
try:
json.dumps(key)
json.dumps(value)
except:
self.logger.info(f"json error: cannot save {repr(key)} ({repr(value)})")
return
self._set_key_in_user_config(key, value, save)
def _set_key_in_user_config(self, key, value, save=True):
with self.lock:
if value is not None:
self.user_config[key] = value
else:
self.user_config.pop(key, None)
if save:
self.save_user_config()
def get(self, key, default=None):
with self.lock:
out = self.cmdline_options.get(key)
if out is None:
out = self.user_config.get(key, default)
return out
def _check_dependent_keys(self) -> None:
if self.get('serverfingerprint'):
if not self.get('server'):
raise Exception("config key 'serverfingerprint' requires 'server' to also be set")
self.make_key_not_modifiable('server')
def requires_upgrade(self):
return self.get_config_version() < FINAL_CONFIG_VERSION
def upgrade(self):
with self.lock:
self.logger.info('upgrading config')
self.convert_version_2()
self.convert_version_3()
self.set_key('config_version', FINAL_CONFIG_VERSION, save=True)
def convert_version_2(self):
if not self._is_upgrade_method_needed(1, 1):
return
self.rename_config_keys(self.user_config, {'auto_cycle': 'auto_connect'})
try:
# change server string FROM host:port:proto TO host:port:s
server_str = self.user_config.get('server')
host, port, protocol = str(server_str).rsplit(':', 2)
assert protocol in ('s', 't')
int(port) # Throw if cannot be converted to int
server_str = '{}:{}:s'.format(host, port)
self._set_key_in_user_config('server', server_str)
except BaseException:
self._set_key_in_user_config('server', None)
self.set_key('config_version', 2)
def convert_version_3(self):
if not self._is_upgrade_method_needed(2, 2):
return
base_unit = self.user_config.get('base_unit')
if isinstance(base_unit, str):
self._set_key_in_user_config('base_unit', None)
map_ = {'btc':8, 'mbtc':5, 'ubtc':2, 'bits':2, 'sat':0}
decimal_point = map_.get(base_unit.lower())
self._set_key_in_user_config('decimal_point', decimal_point)
self.set_key('config_version', 3)
def _is_upgrade_method_needed(self, min_version, max_version):
cur_version = self.get_config_version()
if cur_version > max_version:
return False
elif cur_version < min_version:
raise Exception(
('config upgrade: unexpected version %d (should be %d-%d)'
% (cur_version, min_version, max_version)))
else:
return True
def get_config_version(self):
config_version = self.get('config_version', 1)
if config_version > FINAL_CONFIG_VERSION:
self.logger.warning('config version ({}) is higher than latest ({})'
.format(config_version, FINAL_CONFIG_VERSION))
return config_version
def is_modifiable(self, key) -> bool:
return (key not in self.cmdline_options
and key not in self._not_modifiable_keys)
def make_key_not_modifiable(self, key) -> None:
self._not_modifiable_keys.add(key)
def save_user_config(self):
if self.get('forget_config'):
return
if not self.path:
return
path = os.path.join(self.path, "config")
s = json.dumps(self.user_config, indent=4, sort_keys=True)
try:
with open(path, "w", encoding='utf-8') as f:
f.write(s)
os.chmod(path, stat.S_IREAD | stat.S_IWRITE)
except FileNotFoundError:
# datadir probably deleted while running...
if os.path.exists(self.path): # or maybe not?
raise
def get_backup_dir(self):
# this is used to save a backup everytime a channel is created
# on Android, the export backup button uses android_backup_dir()
if 'ANDROID_DATA' in os.environ:
return None
else:
return self.get('backup_dir')
def get_wallet_path(self, *, use_gui_last_wallet=False):
"""Set the path of the wallet."""
# command line -w option
if self.get('wallet_path'):
return os.path.join(self.get('cwd', ''), self.get('wallet_path'))
if use_gui_last_wallet:
path = self.get('gui_last_wallet')
if path and os.path.exists(path):
return path
# default path
util.assert_datadir_available(self.path)
dirpath = os.path.join(self.path, "wallets")
make_dir(dirpath, allow_symlink=False)
new_path = os.path.join(self.path, "wallets", "default_wallet")
# default path in pre 1.9 versions
old_path = os.path.join(self.path, "electrum.dat")
if os.path.exists(old_path) and not os.path.exists(new_path):
os.rename(old_path, new_path)
return new_path
def remove_from_recently_open(self, filename):
recent = self.get('recently_open', [])
if filename in recent:
recent.remove(filename)
self.set_key('recently_open', recent)
def set_session_timeout(self, seconds):
self.logger.info(f"session timeout -> {seconds} seconds")
self.set_key('session_timeout', seconds)
def get_session_timeout(self):
return self.get('session_timeout', 300)
def save_last_wallet(self, wallet):
if self.get('wallet_path') is None:
path = wallet.storage.path
self.set_key('gui_last_wallet', path)
def impose_hard_limits_on_fee(func):
def get_fee_within_limits(self, *args, **kwargs):
fee = func(self, *args, **kwargs)
if fee is None:
return fee
fee = min(FEERATE_MAX_DYNAMIC, fee)
fee = max(FEERATE_DEFAULT_RELAY, fee)
return fee
return get_fee_within_limits
def eta_to_fee(self, slider_pos) -> Optional[int]:
"""Returns fee in sat/kbyte."""
slider_pos = max(slider_pos, 0)
slider_pos = min(slider_pos, len(FEE_ETA_TARGETS))
if slider_pos < len(FEE_ETA_TARGETS):
num_blocks = FEE_ETA_TARGETS[int(slider_pos)]
fee = self.eta_target_to_fee(num_blocks)
else:
fee = self.eta_target_to_fee(1)
return fee
@impose_hard_limits_on_fee
def eta_target_to_fee(self, num_blocks: int) -> Optional[int]:
"""Returns fee in sat/kbyte."""
if num_blocks == 1:
fee = self.fee_estimates.get(2)
if fee is not None:
fee += fee / 2
fee = int(fee)
else:
fee = self.fee_estimates.get(num_blocks)
if fee is not None:
fee = int(fee)
return fee
def fee_to_depth(self, target_fee: Real) -> Optional[int]:
"""For a given sat/vbyte fee, returns an estimate of how deep
it would be in the current mempool in vbytes.
Pessimistic == overestimates the depth.
"""
if self.mempool_fees is None:
return None
depth = 0
for fee, s in self.mempool_fees:
depth += s
if fee <= target_fee:
break
return depth
def depth_to_fee(self, slider_pos) -> Optional[int]:
"""Returns fee in sat/kbyte."""
target = self.depth_target(slider_pos)
return self.depth_target_to_fee(target)
@impose_hard_limits_on_fee
def depth_target_to_fee(self, target: int) -> Optional[int]:
"""Returns fee in sat/kbyte.
target: desired mempool depth in vbytes
"""
if self.mempool_fees is None:
return None
depth = 0
for fee, s in self.mempool_fees:
depth += s
if depth > target:
break
else:
return 0
# add one sat/byte as currently that is
# the max precision of the histogram
# (well, in case of ElectrumX at least. not for electrs)
fee += 1
# convert to sat/kbyte
return int(fee * 1000)
def depth_target(self, slider_pos: int) -> int:
"""Returns mempool depth target in bytes for a fee slider position."""
slider_pos = max(slider_pos, 0)
slider_pos = min(slider_pos, len(FEE_DEPTH_TARGETS)-1)
return FEE_DEPTH_TARGETS[slider_pos]
def eta_target(self, slider_pos: int) -> int:
"""Returns 'num blocks' ETA target for a fee slider position."""
if slider_pos == len(FEE_ETA_TARGETS):
return 1
return FEE_ETA_TARGETS[slider_pos]
def fee_to_eta(self, fee_per_kb: Optional[int]) -> int:
"""Returns 'num blocks' ETA estimate for given fee rate,
or -1 for low fee.
"""
import operator
lst = list(self.fee_estimates.items())
next_block_fee = self.eta_target_to_fee(1)
if next_block_fee is not None:
lst += [(1, next_block_fee)]
if not lst or fee_per_kb is None:
return -1
dist = map(lambda x: (x[0], abs(x[1] - fee_per_kb)), lst)
min_target, min_value = min(dist, key=operator.itemgetter(1))
if fee_per_kb < self.fee_estimates.get(FEE_ETA_TARGETS[0])/2:
min_target = -1
return min_target
def depth_tooltip(self, depth: Optional[int]) -> str:
"""Returns text tooltip for given mempool depth (in vbytes)."""
if depth is None:
return "unknown from tip"
return "%.1f MB from tip" % (depth/1_000_000)
def eta_tooltip(self, x):
if x < 0:
return _('Low fee')
elif x == 1:
return _('In the next block')
else:
return _('Within {} blocks').format(x)
def get_fee_target(self):
dyn = self.is_dynfee()
mempool = self.use_mempool_fees()
pos = self.get_depth_level() if mempool else self.get_fee_level()
fee_rate = self.fee_per_kb()
target, tooltip = self.get_fee_text(pos, dyn, mempool, fee_rate)
return target, tooltip, dyn
def get_fee_status(self):
target, tooltip, dyn = self.get_fee_target()
return tooltip + ' [%s]'%target if dyn else target + ' [Static]'
def get_fee_text(
self,
slider_pos: int,
dyn: bool,
mempool: bool,
fee_per_kb: Optional[int],
):
"""Returns (text, tooltip) where
text is what we target: static fee / num blocks to confirm in / mempool depth
tooltip is the corresponding estimate (e.g. num blocks for a static fee)
fee_rate is in sat/kbyte
"""
if fee_per_kb is None:
rate_str = 'unknown'
fee_per_byte = None
else:
fee_per_byte = fee_per_kb/1000
rate_str = format_fee_satoshis(fee_per_byte) + ' sat/byte'
if dyn:
if mempool:
depth = self.depth_target(slider_pos)
text = self.depth_tooltip(depth)
else:
eta = self.eta_target(slider_pos)
text = self.eta_tooltip(eta)
tooltip = rate_str
else: # using static fees
assert fee_per_kb is not None
assert fee_per_byte is not None
text = rate_str
if mempool and self.has_fee_mempool():
depth = self.fee_to_depth(fee_per_byte)
tooltip = self.depth_tooltip(depth)
elif not mempool and self.has_fee_etas():
eta = self.fee_to_eta(fee_per_kb)
tooltip = self.eta_tooltip(eta)
else:
tooltip = ''
return text, tooltip
def get_depth_level(self):
maxp = len(FEE_DEPTH_TARGETS) - 1
return min(maxp, self.get('depth_level', 2))
def get_fee_level(self):
maxp = len(FEE_ETA_TARGETS) # not (-1) to have "next block"
return min(maxp, self.get('fee_level', 2))
def get_fee_slider(self, dyn, mempool) -> Tuple[int, int, Optional[int]]:
if dyn:
if mempool:
pos = self.get_depth_level()
maxp = len(FEE_DEPTH_TARGETS) - 1
fee_rate = self.depth_to_fee(pos)
else:
pos = self.get_fee_level()
maxp = len(FEE_ETA_TARGETS) # not (-1) to have "next block"
fee_rate = self.eta_to_fee(pos)
else:
fee_rate = self.fee_per_kb(dyn=False)
pos = self.static_fee_index(fee_rate)
maxp = len(FEERATE_STATIC_VALUES) - 1
return maxp, pos, fee_rate
def static_fee(self, i):
return FEERATE_STATIC_VALUES[i]
def static_fee_index(self, fee_per_kb: Optional[int]) -> int:
if fee_per_kb is None:
raise TypeError('static fee cannot be None')
dist = list(map(lambda x: abs(x - fee_per_kb), FEERATE_STATIC_VALUES))
return min(range(len(dist)), key=dist.__getitem__)
def has_fee_etas(self):
return len(self.fee_estimates) == 4
def has_fee_mempool(self) -> bool:
return self.mempool_fees is not None
def has_dynamic_fees_ready(self):
if self.use_mempool_fees():
return self.has_fee_mempool()
else:
return self.has_fee_etas()
def is_dynfee(self):
return bool(self.get('dynamic_fees', True))
def use_mempool_fees(self):
return bool(self.get('mempool_fees', False))
def _feerate_from_fractional_slider_position(self, fee_level: float, dyn: bool,
mempool: bool) -> Union[int, None]:
fee_level = max(fee_level, 0)
fee_level = min(fee_level, 1)
if dyn:
max_pos = (len(FEE_DEPTH_TARGETS) - 1) if mempool else len(FEE_ETA_TARGETS)
slider_pos = round(fee_level * max_pos)
fee_rate = self.depth_to_fee(slider_pos) if mempool else self.eta_to_fee(slider_pos)
else:
max_pos = len(FEERATE_STATIC_VALUES) - 1
slider_pos = round(fee_level * max_pos)
fee_rate = FEERATE_STATIC_VALUES[slider_pos]
return fee_rate
def fee_per_kb(self, dyn: bool=None, mempool: bool=None, fee_level: float=None) -> Optional[int]:
"""Returns sat/kvB fee to pay for a txn.
Note: might return None.
fee_level: float between 0.0 and 1.0, representing fee slider position
"""
if constants.net is constants.BitcoinRegtest:
return FEERATE_REGTEST_HARDCODED
if dyn is None:
dyn = self.is_dynfee()
if mempool is None:
mempool = self.use_mempool_fees()
if fee_level is not None:
return self._feerate_from_fractional_slider_position(fee_level, dyn, mempool)
# there is no fee_level specified; will use config.
# note: 'depth_level' and 'fee_level' in config are integer slider positions,
# unlike fee_level here, which (when given) is a float in [0.0, 1.0]
if dyn:
if mempool:
fee_rate = self.depth_to_fee(self.get_depth_level())
else:
fee_rate = self.eta_to_fee(self.get_fee_level())
else:
fee_rate = self.get('fee_per_kb', FEERATE_FALLBACK_STATIC_FEE)
if fee_rate is not None:
fee_rate = int(fee_rate)
return fee_rate
def fee_per_byte(self):
"""Returns sat/vB fee to pay for a txn.
Note: might return None.
"""
fee_per_kb = self.fee_per_kb()
return fee_per_kb / 1000 if fee_per_kb is not None else None
def estimate_fee(self, size: Union[int, float, Decimal], *,
allow_fallback_to_static_rates: bool = False) -> int:
fee_per_kb = self.fee_per_kb()
if fee_per_kb is None:
if allow_fallback_to_static_rates:
fee_per_kb = FEERATE_FALLBACK_STATIC_FEE
else:
raise NoDynamicFeeEstimates()
return self.estimate_fee_for_feerate(fee_per_kb, size)
@classmethod
def estimate_fee_for_feerate(cls, fee_per_kb: Union[int, float, Decimal],
size: Union[int, float, Decimal]) -> int:
size = Decimal(size)
fee_per_kb = Decimal(fee_per_kb)
fee_per_byte = fee_per_kb / 1000
# to be consistent with what is displayed in the GUI,
# the calculation needs to use the same precision:
fee_per_byte = quantize_feerate(fee_per_byte)
return round(fee_per_byte * size)
def update_fee_estimates(self, nblock_target: int, fee_per_kb: int):
assert isinstance(nblock_target, int), f"expected int, got {nblock_target!r}"
assert isinstance(fee_per_kb, int), f"expected int, got {fee_per_kb!r}"
self.fee_estimates[nblock_target] = fee_per_kb
def is_fee_estimates_update_required(self):
"""Checks time since last requested and updated fee estimates.
Returns True if an update should be requested.
"""
now = time.time()
return now - self.last_time_fee_estimates_requested > 60
def requested_fee_estimates(self):
self.last_time_fee_estimates_requested = time.time()
def get_video_device(self):
device = self.get("video_device", "default")
if device == 'default':
device = ''
return device
def get_ssl_context(self):
ssl_keyfile = self.get('ssl_keyfile')
ssl_certfile = self.get('ssl_certfile')
if ssl_keyfile and ssl_certfile:
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(ssl_certfile, ssl_keyfile)
return ssl_context
def get_ssl_domain(self):
from .paymentrequest import check_ssl_config
if self.get('ssl_keyfile') and self.get('ssl_certfile'):
SSL_identity = check_ssl_config(self)
else:
SSL_identity = None
return SSL_identity
def get_netaddress(self, key: str) -> Optional[NetAddress]:
text = self.get(key)
if text:
try:
return NetAddress.from_string(text)
except:
pass
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=self.num_zeros,
decimal_point=self.decimal_point,
is_diff=is_diff,
whitespaces=whitespaces,
precision=self.amt_precision_post_satoshi,
)
def format_amount_and_units(self, amount):
return self.format_amount(amount) + ' '+ self.get_base_unit()
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def set_base_unit(self, unit):
assert unit in base_units.keys()
self.decimal_point = base_unit_name_to_decimal_point(unit)
self.set_key('decimal_point', self.decimal_point, True)
def get_decimal_point(self):
return self.decimal_point
def read_user_config(path):
"""Parse and store the user config settings in electrum.conf into user_config[]."""
if not path:
return {}
config_path = os.path.join(path, "config")
if not os.path.exists(config_path):
return {}
try:
with open(config_path, "r", encoding='utf-8') as f:
data = f.read()
result = json.loads(data)
except:
_logger.warning(f"Cannot read config file. {config_path}")
return {}
if not type(result) is dict:
return {}
return result
| 1 | 14,310 | Why change the default value of `is_diff`? | spesmilo-electrum | py |
@@ -37,7 +37,7 @@ readme_note = """
with open('README.rst') as fobj:
long_description = "\n\n" + readme_note + "\n\n" + fobj.read()
-install_requires = ['python-dateutil>=2.7.5,<3']
+install_requires = ['python-dateutil>=2.7.5,<3', 'tenacity>=6.3.0']
# Can't use python-daemon>=2.2.0 if on windows
# See https://pagure.io/python-daemon/issue/18 | 1 | # Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import sys
from setuptools import setup
def get_static_files(path):
return [os.path.join(dirpath.replace("luigi/", ""), ext)
for (dirpath, dirnames, filenames) in os.walk(path)
for ext in ["*.html", "*.js", "*.css", "*.png",
"*.eot", "*.svg", "*.ttf", "*.woff", "*.woff2"]]
luigi_package_data = sum(map(get_static_files, ["luigi/static", "luigi/templates"]), [])
readme_note = """
.. note::
For the latest source, discussion, etc, please visit the
`GitHub repository <https://github.com/spotify/luigi>`_
"""
with open('README.rst') as fobj:
long_description = "\n\n" + readme_note + "\n\n" + fobj.read()
install_requires = ['python-dateutil>=2.7.5,<3']
# Can't use python-daemon>=2.2.0 if on windows
# See https://pagure.io/python-daemon/issue/18
if sys.platform == 'nt':
install_requires.append('python-daemon<2.2.0')
else:
install_requires.append('python-daemon')
# Start from tornado 6, the minimum supported Python version is 3.5.2.
if sys.version_info[:3] >= (3, 5, 2):
install_requires.append('tornado>=5.0,<7')
else:
install_requires.append('tornado>=5.0,<6')
# Note: To support older versions of setuptools, we're explicitly not
# using conditional syntax (i.e. 'enum34>1.1.0;python_version<"3.4"').
# This syntax is a problem for setuptools as recent as `20.1.1`,
# published Feb 16, 2016.
if sys.version_info[:2] < (3, 4):
install_requires.append('enum34>1.1.0')
if os.environ.get('READTHEDOCS', None) == 'True':
# So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla
install_requires.append('sqlalchemy')
# readthedocs don't like python-daemon, see #1342
install_requires = [x for x in install_requires if not x.startswith('python-daemon')]
install_requires.append('sphinx>=1.4.4') # Value mirrored in doc/conf.py
# load meta package infos
meta = {}
with open("luigi/__meta__.py", "r") as f:
exec(f.read(), meta)
setup(
name='luigi',
version=meta['__version__'],
description=meta['__doc__'].strip(),
long_description=long_description,
author=meta['__author__'],
url=meta['__contact__'],
license=meta['__license__'],
packages=[
'luigi',
'luigi.configuration',
'luigi.contrib',
'luigi.contrib.hdfs',
'luigi.tools'
],
package_data={
'luigi': luigi_package_data
},
entry_points={
'console_scripts': [
'luigi = luigi.cmdline:luigi_run',
'luigid = luigi.cmdline:luigid',
'luigi-grep = luigi.tools.luigi_grep:main',
'luigi-deps = luigi.tools.deps:main',
'luigi-deps-tree = luigi.tools.deps_tree:main'
]
},
install_requires=install_requires,
extras_require={
'prometheus': ['prometheus-client==0.5.0'],
'toml': ['toml<2.0.0'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: System :: Monitoring',
],
)
| 1 | 19,977 | I think tacking on `,<7` would be a wise precaution in case of future incompatible changes in tenacity. | spotify-luigi | py |
@@ -38,7 +38,7 @@ final class NoDriverManager implements GalleryManagerInterface, MediaManagerInte
* @param int|null $limit
* @param int|null $offset
*/
- public function findBy(array $criteria, ?array $orderBy = null, $limit = null, $offset = null): array
+ public function findBy(array $criteria, ?array $orderBy = null, $limit = null, $offset = null)
{
throw new NoDriverException();
} | 1 | <?php
declare(strict_types=1);
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Model;
use Doctrine\DBAL\Connection;
use Sonata\DatagridBundle\Pager\PagerInterface;
use Sonata\MediaBundle\Exception\NoDriverException;
/**
* @internal
*
* @author Andrey F. Mindubaev <[email protected]>
*/
final class NoDriverManager implements GalleryManagerInterface, MediaManagerInterface
{
public function getClass(): string
{
throw new NoDriverException();
}
public function findAll(): array
{
throw new NoDriverException();
}
/**
* @param int|null $limit
* @param int|null $offset
*/
public function findBy(array $criteria, ?array $orderBy = null, $limit = null, $offset = null): array
{
throw new NoDriverException();
}
public function findOneBy(array $criteria, ?array $orderBy = null): object
{
throw new NoDriverException();
}
/**
* @param mixed $id
*/
public function find($id): ?object
{
throw new NoDriverException();
}
public function create(): object
{
throw new NoDriverException();
}
/**
* @param object $entity
* @param bool $andFlush
*/
public function save($entity, $andFlush = true): void
{
throw new NoDriverException();
}
/**
* @param object $entity
* @param bool $andFlush
*/
public function delete($entity, $andFlush = true): void
{
throw new NoDriverException();
}
public function getTableName(): string
{
throw new NoDriverException();
}
public function getConnection(): Connection
{
throw new NoDriverException();
}
public function getPager(array $criteria, int $page, int $limit = 10, array $sort = []): PagerInterface
{
throw new NoDriverException();
}
}
| 1 | 12,209 | I don't think is necessary to remove the return type | sonata-project-SonataMediaBundle | php |
@@ -128,7 +128,11 @@ public class AuthenticatorService extends Service {
final String userId = ForceApp.decryptWithPasscode(mgr.getUserData(account, AuthenticatorService.KEY_USER_ID), passcodeHash);
final String orgId = ForceApp.decryptWithPasscode(mgr.getUserData(account, AuthenticatorService.KEY_ORG_ID), passcodeHash);
final String username = ForceApp.decryptWithPasscode(mgr.getUserData(account, AuthenticatorService.KEY_USERNAME), passcodeHash);
- final String clientSecret = ForceApp.decryptWithPasscode(mgr.getUserData(account, AuthenticatorService.KEY_CLIENT_SECRET), passcodeHash);
+ final String encClientSecret = mgr.getUserData(account, AuthenticatorService.KEY_CLIENT_SECRET);
+ String clientSecret = null;
+ if (encClientSecret != null) {
+ clientSecret = ForceApp.decryptWithPasscode(encClientSecret, passcodeHash);
+ }
final Bundle resBundle = new Bundle();
try {
final TokenEndpointResponse tr = OAuth2.refreshAuthToken(HttpAccess.DEFAULT, new URI(loginServer), clientId, refreshToken, clientSecret); | 1 | /*
* Copyright (c) 2011, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.auth;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.http.client.ClientProtocolException;
import android.accounts.AbstractAccountAuthenticator;
import android.accounts.Account;
import android.accounts.AccountAuthenticatorResponse;
import android.accounts.AccountManager;
import android.accounts.NetworkErrorException;
import android.app.Service;
import android.content.Context;
import android.content.Intent;
import android.os.Bundle;
import android.os.IBinder;
import android.util.Log;
import com.salesforce.androidsdk.app.ForceApp;
import com.salesforce.androidsdk.auth.OAuth2.OAuthFailedException;
import com.salesforce.androidsdk.auth.OAuth2.TokenEndpointResponse;
import com.salesforce.androidsdk.rest.ClientManager.LoginOptions;
/**
* The service used for taking care of authentication for a Salesforce-based application.
* See {@link <a href="http://developer.android.com/reference/android/accounts/AbstractAccountAuthenticator.html">AbstractAccountAuthenticator</a>}.
*/
public class AuthenticatorService extends Service {
private static Authenticator authenticator;
// Keys to extra info in the account
public static final String KEY_LOGIN_URL = "loginUrl";
public static final String KEY_INSTANCE_URL = "instanceUrl";
public static final String KEY_USER_ID = "userId";
public static final String KEY_CLIENT_ID = "clientId";
public static final String KEY_ORG_ID = "orgId";
public static final String KEY_USERNAME = "username";
public static final String KEY_ID_URL = "id";
public static final String KEY_CLIENT_SECRET = "clientSecret";
private Authenticator getAuthenticator() {
if (authenticator == null)
authenticator = new Authenticator(this);
return authenticator;
}
@Override
public IBinder onBind(Intent intent) {
if (intent.getAction().equals(AccountManager.ACTION_AUTHENTICATOR_INTENT))
return getAuthenticator().getIBinder();
return null;
}
/**
* The Authenticator for salesforce accounts.
* - addAccount Start the login flow (by launching the activity filtering the salesforce.intent.action.LOGIN intent).
* - getAuthToken Refresh the token by calling {@link OAuth2#refreshAuthToken(HttpAccess, URI, String, String) OAuth2.refreshAuthToken}.
*/
private static class Authenticator extends AbstractAccountAuthenticator {
private final Context context;
Authenticator(Context ctx) {
super(ctx);
this.context = ctx;
}
@Override
public Bundle addAccount(
AccountAuthenticatorResponse response,
String accountType,
String authTokenType,
String[] requiredFeatures,
Bundle options)
throws NetworkErrorException {
Log.i("Authenticator:addAccount", "Options: " + options);
return makeAuthIntentBundle(response, options);
}
/**
* Uses the refresh token to get a new access token.
* Remember that the authenticator runs under its own separate process, so if you want to debug you
* need to attach to the :auth process, and not the main chatter process.
*/
@Override
public Bundle getAuthToken(
AccountAuthenticatorResponse response,
Account account,
String authTokenType,
Bundle options) throws NetworkErrorException {
Log.i("Authenticator:getAuthToken", "Get auth token for " + account.name);
final AccountManager mgr = AccountManager.get(context);
final String passcodeHash = LoginOptions.fromBundle(options).passcodeHash;
final String refreshToken = ForceApp.decryptWithPasscode(mgr.getPassword(account), passcodeHash);
final String loginServer = ForceApp.decryptWithPasscode(mgr.getUserData(account, AuthenticatorService.KEY_LOGIN_URL), passcodeHash);
final String clientId = ForceApp.decryptWithPasscode(mgr.getUserData(account, AuthenticatorService.KEY_CLIENT_ID), passcodeHash);
final String instServer = ForceApp.decryptWithPasscode(mgr.getUserData(account, AuthenticatorService.KEY_INSTANCE_URL), passcodeHash);
final String userId = ForceApp.decryptWithPasscode(mgr.getUserData(account, AuthenticatorService.KEY_USER_ID), passcodeHash);
final String orgId = ForceApp.decryptWithPasscode(mgr.getUserData(account, AuthenticatorService.KEY_ORG_ID), passcodeHash);
final String username = ForceApp.decryptWithPasscode(mgr.getUserData(account, AuthenticatorService.KEY_USERNAME), passcodeHash);
final String clientSecret = ForceApp.decryptWithPasscode(mgr.getUserData(account, AuthenticatorService.KEY_CLIENT_SECRET), passcodeHash);
final Bundle resBundle = new Bundle();
try {
final TokenEndpointResponse tr = OAuth2.refreshAuthToken(HttpAccess.DEFAULT, new URI(loginServer), clientId, refreshToken, clientSecret);
// Handle the case where the org has been migrated to a new instance, or has turned on my domains.
if (!instServer.equalsIgnoreCase(tr.instanceUrl)) {
mgr.setUserData(account, AuthenticatorService.KEY_INSTANCE_URL, ForceApp.encryptWithPasscode(tr.instanceUrl, passcodeHash));
}
// Update auth token in account.
mgr.setUserData(account, AccountManager.KEY_AUTHTOKEN, ForceApp.encryptWithPasscode(tr.authToken, passcodeHash));
resBundle.putString(AccountManager.KEY_ACCOUNT_NAME, account.name);
resBundle.putString(AccountManager.KEY_ACCOUNT_TYPE, account.type);
resBundle.putString(AccountManager.KEY_AUTHTOKEN, ForceApp.encryptWithPasscode(tr.authToken, passcodeHash));
resBundle.putString(AuthenticatorService.KEY_LOGIN_URL, ForceApp.encryptWithPasscode(loginServer, passcodeHash));
resBundle.putString(AuthenticatorService.KEY_INSTANCE_URL, ForceApp.encryptWithPasscode(instServer, passcodeHash));
resBundle.putString(AuthenticatorService.KEY_CLIENT_ID, ForceApp.encryptWithPasscode(clientId, passcodeHash));
resBundle.putString(AuthenticatorService.KEY_USERNAME, ForceApp.encryptWithPasscode(username, passcodeHash));
resBundle.putString(AuthenticatorService.KEY_USER_ID, ForceApp.encryptWithPasscode(userId, passcodeHash));
resBundle.putString(AuthenticatorService.KEY_ORG_ID, ForceApp.encryptWithPasscode(orgId, passcodeHash));
resBundle.putString(AuthenticatorService.KEY_CLIENT_SECRET, ForceApp.encryptWithPasscode(clientSecret, passcodeHash));
Log.i("Authenticator:getAuthToken", "Returning auth bundle for " + account.name);
} catch (ClientProtocolException e) {
Log.w("Authenticator:getAuthToken", "", e);
throw new NetworkErrorException(e);
} catch (IOException e) {
Log.w("Authenticator:getAuthToken", "", e);
throw new NetworkErrorException(e);
} catch (URISyntaxException e) {
Log.w("Authenticator:getAuthToken", "", e);
throw new NetworkErrorException(e);
} catch (OAuthFailedException e) {
if (e.isRefreshTokenInvalid()) {
// the exception explicitly indicates that the refresh token is no longer valid.
return makeAuthIntentBundle(response, options);
}
resBundle.putString(AccountManager.KEY_ERROR_CODE, e.response.error);
resBundle.putString(AccountManager.KEY_ERROR_MESSAGE, e.response.errorDescription);
}
Log.i("Authenticator:getAuthToken", "Result: " + resBundle);
return resBundle;
}
/**
* Return bundle with intent to start the login flow.
*
* @param response
* @param options
* @return
*/
private Bundle makeAuthIntentBundle(AccountAuthenticatorResponse response, Bundle options) {
Bundle reply = new Bundle();
Intent i = new Intent(context, ForceApp.APP.getLoginActivityClass());
i.setFlags(Intent.FLAG_ACTIVITY_SINGLE_TOP);
i.putExtra(AccountManager.KEY_ACCOUNT_AUTHENTICATOR_RESPONSE, response);
if (options != null)
i.putExtras(options);
reply.putParcelable(AccountManager.KEY_INTENT, i);
return reply;
}
@Override
public Bundle updateCredentials(AccountAuthenticatorResponse response, Account account, String authTokenType, Bundle options) throws NetworkErrorException {
return null;
}
@Override
public Bundle confirmCredentials(AccountAuthenticatorResponse response, Account account, Bundle options) throws NetworkErrorException {
return null;
}
@Override
public Bundle editProperties(AccountAuthenticatorResponse response, String accountType) {
return null;
}
@Override
public String getAuthTokenLabel(String authTokenType) {
return null;
}
@Override
public Bundle hasFeatures(AccountAuthenticatorResponse response, Account account, String[] features) throws NetworkErrorException {
return null;
}
}
}
| 1 | 13,339 | Also added this check, since client secret is not used except in the IP bypass scenario. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -151,6 +151,8 @@ const baseActions = {
return;
}
+ // Clear any profile ID selection in the case that selection falls to the getProfiles resolver.
+ registry.dispatch( STORE_NAME ).setProfileID( '' );
registry.dispatch( STORE_NAME ).setPropertyID( propertyID );
if ( PROPERTY_CREATE === propertyID ) { | 1 | /**
* `modules/analytics` data store: properties.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import invariant from 'invariant';
/**
* Internal dependencies
*/
import API from 'googlesitekit-api';
import Data from 'googlesitekit-data';
import { isValidAccountID, isValidPropertyID, parsePropertyID, isValidPropertySelection } from '../util';
import { STORE_NAME, PROPERTY_CREATE, PROFILE_CREATE } from './constants';
import { createFetchStore } from '../../../googlesitekit/data/create-fetch-store';
const { createRegistrySelector, createRegistryControl } = Data;
const fetchGetPropertiesProfilesStore = createFetchStore( {
baseName: 'getPropertiesProfiles',
controlCallback: ( { accountID } ) => {
return API.get( 'modules', 'analytics', 'properties-profiles', { accountID }, {
useCache: false,
} );
},
reducerCallback: ( state, response, { accountID } ) => {
// Actual properties, profiles are set by resolver with custom logic,
// hence here we just set a flag.
return {
...state,
isAwaitingPropertiesProfilesCompletion: {
...state.isAwaitingPropertiesProfilesCompletion,
[ accountID ]: true,
},
};
},
argsToParams: ( accountID ) => {
return { accountID };
},
validateParams: ( { accountID } = {} ) => {
invariant( accountID, 'accountID is required.' );
},
} );
const fetchCreatePropertyStore = createFetchStore( {
baseName: 'createProperty',
controlCallback: ( { accountID } ) => {
return API.set( 'modules', 'analytics', 'create-property', { accountID } );
},
reducerCallback: ( state, property, { accountID } ) => {
return {
...state,
properties: {
...state.properties,
[ accountID ]: [
...( state.properties[ accountID ] || [] ),
property,
],
},
};
},
argsToParams: ( accountID ) => {
return { accountID };
},
validateParams: ( { accountID } = {} ) => {
invariant( accountID, 'accountID is required.' );
},
} );
// Actions
const RECEIVE_MATCHED_PROPERTY = 'RECEIVE_MATCHED_PROPERTY';
const RECEIVE_GET_PROPERTIES = 'RECEIVE_GET_PROPERTIES';
const RECEIVE_PROPERTIES_PROFILES_COMPLETION = 'RECEIVE_PROPERTIES_PROFILES_COMPLETION';
const WAIT_FOR_PROPERTIES = 'WAIT_FOR_PROPERTIES';
const baseInitialState = {
properties: {},
isAwaitingPropertiesProfilesCompletion: {},
matchedProperty: undefined,
};
const baseActions = {
/**
* Creates a new Analytics property.
*
* Creates a new Analytics property for an existing Google Analytics account.
*
* @since 1.8.0
*
* @param {string} accountID Google Analytics account ID.
* @return {Object} Object with `response` and `error`.
*/
*createProperty( accountID ) {
invariant( accountID, 'accountID is required.' );
const { response, error } = yield fetchCreatePropertyStore.actions.fetchCreateProperty( accountID );
return { response, error };
},
/**
* Adds a matchedProperty to the store.
*
* @since 1.8.0
* @private
*
* @param {Object} matchedProperty Property object.
* @return {Object} Redux-style action.
*/
receiveMatchedProperty( matchedProperty ) {
invariant( matchedProperty, 'matchedProperty is required.' );
return {
payload: { matchedProperty },
type: RECEIVE_MATCHED_PROPERTY,
};
},
/**
* Sets the given property and related fields in the store.
*
* @since 1.8.0
* @private
*
* @param {string} propertyID Property ID to select.
* @param {string} [internalPropertyID] Internal property ID (if available).
* @return {Object} A Generator function.
*/
selectProperty( propertyID, internalPropertyID = '' ) {
invariant( isValidPropertySelection( propertyID ), 'A valid propertyID selection is required.' );
return ( function* () {
const registry = yield Data.commonActions.getRegistry();
const accountID = registry.select( STORE_NAME ).getAccountID();
if ( ! isValidAccountID( accountID ) ) {
return;
}
registry.dispatch( STORE_NAME ).setPropertyID( propertyID );
if ( PROPERTY_CREATE === propertyID ) {
registry.dispatch( STORE_NAME ).setProfileID( PROFILE_CREATE );
return;
}
yield baseActions.waitForProperties( accountID );
const property = registry.select( STORE_NAME ).getPropertyByID( propertyID ) || {};
if ( ! internalPropertyID ) {
internalPropertyID = property.internalWebPropertyId; // eslint-disable-line sitekit/camelcase-acronyms
}
registry.dispatch( STORE_NAME ).setInternalWebPropertyID( internalPropertyID || '' );
// Clear any profile ID selection in the case that selection falls to the getProfiles resolver.
registry.dispatch( STORE_NAME ).setProfileID( '' );
const profiles = registry.select( STORE_NAME ).getProfiles( accountID, propertyID );
if ( property.defaultProfileId && profiles?.some( ( profile ) => profile.id === property.defaultProfileId ) ) { // eslint-disable-line sitekit/camelcase-acronyms
registry.dispatch( STORE_NAME ).setProfileID( property.defaultProfileId ); // eslint-disable-line sitekit/camelcase-acronyms
return;
}
if ( profiles === undefined ) {
return; // Selection will happen in in getProfiles resolver.
}
const matchedProfile = profiles.find( ( { webPropertyId } ) => webPropertyId === propertyID ) || { id: PROFILE_CREATE }; // eslint-disable-line sitekit/camelcase-acronyms
registry.dispatch( STORE_NAME ).setProfileID( matchedProfile.id );
}() );
},
receiveGetProperties( properties, { accountID } ) {
invariant( Array.isArray( properties ), 'properties must be an array.' );
invariant( accountID, 'accountID is required.' );
return {
payload: { properties, accountID },
type: RECEIVE_GET_PROPERTIES,
};
},
receivePropertiesProfilesCompletion( accountID ) {
invariant( accountID, 'accountID is required.' );
return {
payload: { accountID },
type: RECEIVE_PROPERTIES_PROFILES_COMPLETION,
};
},
waitForProperties( accountID ) {
return {
payload: { accountID },
type: WAIT_FOR_PROPERTIES,
};
},
};
const baseControls = {
[ WAIT_FOR_PROPERTIES ]: createRegistryControl( ( registry ) => ( { payload: { accountID } } ) => {
const arePropertiesLoaded = () => registry.select( STORE_NAME ).getProperties( accountID ) !== undefined;
if ( arePropertiesLoaded() ) {
return true;
}
return new Promise( ( resolve ) => {
const unsubscribe = registry.subscribe( () => {
if ( arePropertiesLoaded() ) {
unsubscribe();
resolve();
}
} );
} );
} ),
};
const baseReducer = ( state, { type, payload } ) => {
switch ( type ) {
case RECEIVE_MATCHED_PROPERTY: {
const { matchedProperty } = payload;
return {
...state,
matchedProperty,
};
}
case RECEIVE_GET_PROPERTIES: {
const { properties, accountID } = payload;
return {
...state,
properties: {
...state.properties,
[ accountID ]: [ ...properties ],
},
};
}
case RECEIVE_PROPERTIES_PROFILES_COMPLETION: {
const { accountID } = payload;
return {
...state,
isAwaitingPropertiesProfilesCompletion: {
...state.isAwaitingPropertiesProfilesCompletion,
[ accountID ]: false,
},
};
}
default: {
return state;
}
}
};
const baseResolvers = {
*getProperties( accountID ) {
if ( ! isValidAccountID( accountID ) ) {
return;
}
const registry = yield Data.commonActions.getRegistry();
let properties = registry.select( STORE_NAME ).getProperties( accountID );
// Only fetch properties if there are none in the store for the given account.
if ( properties === undefined ) {
const { response, error } = yield fetchGetPropertiesProfilesStore.actions.fetchGetPropertiesProfiles( accountID );
const { dispatch } = registry;
if ( response ) {
dispatch( STORE_NAME ).receiveGetProperties( response.properties, { accountID } );
// eslint-disable-next-line sitekit/camelcase-acronyms
if ( response.profiles?.[ 0 ]?.webPropertyId ) {
// eslint-disable-next-line sitekit/camelcase-acronyms
const propertyID = response.profiles[ 0 ].webPropertyId;
dispatch( STORE_NAME ).receiveGetProfiles( response.profiles, { accountID, propertyID } );
}
if ( response.matchedProperty ) {
dispatch( STORE_NAME ).receiveMatchedProperty( response.matchedProperty );
}
( { properties } = response );
}
dispatch( STORE_NAME ).receivePropertiesProfilesCompletion( accountID );
if ( error ) {
// Store error manually since getProperties signature differs from fetchGetPropertiesProfiles.
yield dispatch( STORE_NAME ).receiveError( error, 'getProperties', [ accountID ] );
return;
}
}
const propertyID = registry.select( STORE_NAME ).getPropertyID();
if ( ! propertyID ) {
const property = properties[ 0 ] || { id: PROPERTY_CREATE };
yield baseActions.selectProperty( property.id, property.internalWebPropertyId ); // eslint-disable-line sitekit/camelcase-acronyms
}
},
};
const baseSelectors = {
/**
* Gets the property object by the property ID.
*
* @since 1.8.0
* @private
*
* @param {Object} state Data store's state.
* @param {string} propertyID Property ID.
* @return {(Object|undefined)} Property object, or undefined if not present in store.
*/
getPropertyByID( state, propertyID ) {
if ( ! isValidPropertyID( propertyID ) ) {
return undefined;
}
const { accountID } = parsePropertyID( propertyID );
return ( state.properties[ accountID ] || [] ).find( ( { id } ) => id === propertyID );
},
/**
* Gets the matched property, if any.
*
* @since 1.8.0
* @private
*
* @param {Object} state Data store's state.
* @return {(Object|undefined)} Matched property if set, otherwise `undefined`.
*/
getMatchedProperty( state ) {
return state.matchedProperty;
},
/**
* Gets all Google Analytics properties this account can access.
*
* Returns an array of all analytics properties.
*
* Returns `undefined` if accounts have not yet loaded.
*
* @since 1.8.0
*
* @param {Object} state Data store's state.
* @param {string} accountID The Analytics Account ID to fetch properties for.
* @return {(Array.<Object>|undefined)} An array of Analytics properties; `undefined` if not loaded.
*/
getProperties( state, accountID ) {
const { properties } = state;
return properties[ accountID ];
},
/**
* Checks if a property is being created for an account.
*
* @since 1.8.0
*
* @param {Object} state Data store's state.
* @param {string} accountID The Analytics Account ID to check for property creation.
* @return {boolean} `true` if creating a property, `false` if not.
*/
isDoingCreateProperty: createRegistrySelector( ( select ) => ( state, accountID ) => {
return select( STORE_NAME ).isFetchingCreateProperty( accountID );
} ),
/**
* Checks if properties are being fetched for the given account.
*
* @since 1.8.0
*
* @param {Object} state Data store's state.
* @param {string} accountID The Analytics Account ID to check for property creation.
* @return {boolean} `true` if fetching a properties, `false` if not.
*/
isDoingGetProperties: createRegistrySelector( ( select ) => ( state, accountID ) => {
// Check if dispatch calls right after fetching are still awaiting.
if ( accountID && state.isAwaitingPropertiesProfilesCompletion[ accountID ] ) {
return true;
}
return select( STORE_NAME ).isFetchingGetPropertiesProfiles( accountID );
} ),
};
const store = Data.combineStores(
fetchGetPropertiesProfilesStore,
fetchCreatePropertyStore,
{
initialState: baseInitialState,
actions: baseActions,
controls: baseControls,
reducer: baseReducer,
resolvers: baseResolvers,
selectors: baseSelectors,
}
);
export const initialState = store.initialState;
export const actions = store.actions;
export const controls = store.controls;
export const reducer = store.reducer;
export const resolvers = store.resolvers;
export const selectors = store.selectors;
export default store;
| 1 | 33,549 | That's unrelated, but shouldn't there also be a call to `setInternalWebPropertyID`, making it empty, for this case? Otherwise, when selecting to create a new property, any previous internal web property ID will still be in state, potentially causing problems. | google-site-kit-wp | js |
@@ -37,7 +37,6 @@ func TestDevLogs(t *testing.T) {
out, err := system.RunCommand(DdevBin, args)
assert.NoError(err)
assert.Contains(string(out), "Server started")
- assert.Contains(string(out), "GET")
cleanup()
} | 1 | package cmd
import (
"testing"
"os"
"github.com/drud/ddev/pkg/testcommon"
"github.com/drud/drud-go/utils/system"
"github.com/stretchr/testify/assert"
)
func TestDevLogsBadArgs(t *testing.T) {
assert := assert.New(t)
testDir := testcommon.CreateTmpDir("no-valid-ddev-config")
err := os.Chdir(testDir)
if err != nil {
t.Skip("Could not change to temporary directory %s: %v", testDir, err)
}
args := []string{"logs"}
out, err := system.RunCommand(DdevBin, args)
assert.Error(err)
assert.Contains(string(out), "unable to determine the application for this command")
}
// TestDevLogs tests that the Dev logs functionality is working.
func TestDevLogs(t *testing.T) {
assert := assert.New(t)
for _, v := range DevTestSites {
cleanup := v.Chdir()
args := []string{"logs"}
out, err := system.RunCommand(DdevBin, args)
assert.NoError(err)
assert.Contains(string(out), "Server started")
assert.Contains(string(out), "GET")
cleanup()
}
}
| 1 | 11,068 | I wonder if we should trigger a PHP error and ensure it ends up in the log? | drud-ddev | php |
@@ -247,6 +247,16 @@ describe TopicsController do
topic.closed.must_equal false
end
+ # TODO: Will need to figure out how javascript will affect the testing.
+ it 'admin can get to move_topic page' do
+ login_as admin
+ get :move_topic, id: topic.id
+ must_respond_with :success
+ must_render_template 'topics/move'
+ end
+
+ # TODO: This test is off, verify the behavior
+ # TODO: Will need to figure out how javascript will affect the testing.
it 'admin can move a topic' do
different_topic = create(:topic)
login_as admin | 1 | require 'test_helper'
describe TopicsController do
let(:forum) { create(:forum) }
let(:user) { create(:account) }
let(:admin) { create(:admin) }
let(:topic) { create(:topic) }
let(:topic_post) { create(:post) }
#-------------User with no account---------------
it 'index' do
get :index, forum_id: forum.id
must_redirect_to forum_path(forum)
end
it 'new' do
get :new, forum_id: forum.id
must_respond_with :unauthorized
end
it 'create should fail if not signed in' do
assert_no_difference(['Topic.count', 'Post.count']) do
post :create, forum_id: forum.id, topic: { title: 'Example Topic title', posts_attributes:
[{ body: 'Example Post body', account_id: nil }] }
end
must_respond_with :unauthorized
end
it 'show with post pagination' do
create_list(:post, 31)
get :show, id: topic.id
must_respond_with :success
# There should be 25 posts max per page
css_select 'html body div#page.container div#page-contents div#topics_show_page.col-md-13 div.span12 div', 25
end
it 'edit' do
get :edit, id: topic.id
must_respond_with :unauthorized
end
it 'update' do
put :update, id: topic.id, topic: { title: 'Changed title for test purposes' }
topic.reload
topic.title.must_equal topic.title
end
it 'destroy' do
topic2 = create(:topic)
assert_no_difference('Topic.count') do
delete :destroy, id: topic2.id
end
end
# #--------------Basic User ----------------------
it 'user index' do
login_as user
get :index, forum_id: forum.id
must_redirect_to forum_path(forum)
end
it 'user new' do
login_as user
get :new, forum_id: forum.id
must_respond_with :success
end
it 'user create a topic and an accompanying post' do
login_as(user)
assert_difference(['Topic.count', 'Post.count']) do
post :create, forum_id: forum.id, topic: { title: 'Example Forum', posts_attributes:
[{ body: 'Post object that comes by default' }] }
end
must_redirect_to forum_path(forum.id)
flash[:notice].must_equal 'Topic successfully created'
end
it 'user fails to create a topic and an accompanying post' do
login_as(user)
assert_no_difference(['Topic.count', 'Post.count']) do
post :create, forum_id: forum.id, topic: { title: '', posts_attributes:
[{ body: '' }] }
end
must_render_template :new
end
test 'user creates a topic/post with valid recaptcha' do
# TODO: Message might be changed
login_as(user)
TopicsController.any_instance.expects(:verify_recaptcha).returns(true)
assert_difference(['Topic.count', 'Post.count']) do
post :create, forum_id: forum.id, topic: { title: 'Example Forum', posts_attributes:
[{ body: 'Post object that comes by default' }] }
end
must_redirect_to forum_path(forum.id)
flash[:notice].must_equal 'Topic successfully created'
end
test 'user fails to create a topic/post because of invalid recaptcha' do
# TODO: message might be changed.
login_as(user)
TopicsController.any_instance.expects(:verify_recaptcha).returns(false)
assert_no_difference(['Topic.count', 'Post.count']) do
post :create, forum_id: forum.id, topic: { title: 'Example Forum', posts_attributes:
[{ body: 'Post object that comes by default' }] }
end
end
it 'user show with post pagination' do
create_list(:post, 31)
login_as user
get :show, id: topic.id
must_respond_with :success
# There should be 25 posts max per page
css_select 'html body div#page.container div#page-contents div#topics_show_page.col-md-13 div.span12 div', 25
end
it 'user edit' do
login_as user
get :edit, id: topic.id
must_respond_with :unauthorized
end
it 'user update' do
login_as user
put :update, id: topic.id, topic: { title: 'Changed title for test purposes' }
topic.reload
topic.title.must_equal topic.title
must_respond_with :unauthorized
end
it 'user destroy' do
login_as user
topic2 = create(:topic)
assert_no_difference('Topic.count') do
post :destroy, id: topic2.id
end
must_respond_with :unauthorized
end
# #-----------Admin Account------------------------
it 'admin index' do
login_as admin
get :index, forum_id: forum.id
must_redirect_to forum_path(forum)
end
it 'admin new' do
login_as admin
get :new, forum_id: forum.id
must_respond_with :success
end
it 'admin create a topic and accompanying post' do
login_as(admin)
assert_difference(['Topic.count', 'Post.count']) do
post :create, forum_id: forum.id, topic: { title: 'Example Topic', posts_attributes:
[{ body: 'Post object that comes by default' }] }
end
must_redirect_to forum_path(forum.id)
flash[:notice].must_equal 'Topic successfully created'
end
it 'admin fails create a topic and an accompanying post' do
login_as(admin)
assert_no_difference(['Topic.count', 'Post.count']) do
post :create, forum_id: forum.id, topic: { title: '', posts_attributes:
[{ body: '' }] }
end
must_render_template :new
end
it 'admin creates a topic/post with valid recaptcha' do
# TODO: Message might change
login_as(admin)
TopicsController.any_instance.expects(:verify_recaptcha).returns(true)
assert_difference(['Topic.count', 'Post.count']) do
post :create, forum_id: forum.id, topic: { title: 'Example Forum', posts_attributes:
[{ body: 'Post object that comes by default' }] }
end
assert_redirected_to forum_path(forum.id)
end
it 'admin fails to create a topic/post because of invalid recaptcha' do
# TODO: Message might change.
login_as(admin)
TopicsController.any_instance.expects(:verify_recaptcha).returns(false)
assert_no_difference(['Topic.count', 'Post.count']) do
post :create, forum_id: forum.id, topic: { title: 'Example Forum', posts_attributes:
[{ body: 'Post object that comes by default' }] }
end
end
it 'admin show with post pagination' do
create_list(:post, 26)
get :show, id: topic.id
must_respond_with :success
# There should be 25 posts max per page
css_select 'html body div#page.container div#page-contents div#topics_show_page.col-md-13 div.span12 div', 25
end
it 'admin edit' do
login_as admin
get :edit, id: topic.id
must_respond_with :success
end
it 'admin update' do
login_as admin
put :update, id: topic.id, topic: { title: 'Changed title for test purposes' }
topic.reload
topic.title.must_equal 'Changed title for test purposes'
flash[:notice].must_equal 'Topic updated'
end
it 'admin fails to update' do
login_as admin
put :update, id: topic.id, topic: { title: '' }
topic.reload
topic.title.must_equal topic.title
flash[:notice].must_equal 'Sorry, there was a problem updating the topic'
end
it 'admin destroy' do
login_as admin
topic2 = create(:topic)
assert_difference('Topic.count', -1) do
delete :destroy, id: topic2.id
end
must_redirect_to forums_path
end
it 'admin can close a topic' do
# TODO: Will need to change the message for this
login_as admin
put :update, id: topic.id, topic: { closed: true }
topic.reload
topic.closed.must_equal true
end
it 'admin can reopen a topic' do
# TODO: Will need to change the message for this
login_as admin
topic.closed = true
topic.reload
put :update, id: topic.id, topic: { closed: false }
topic.closed.must_equal false
end
it 'admin can move a topic' do
different_topic = create(:topic)
login_as admin
put :update, id: topic.id, topic: { forum_id: different_topic.forum_id }
topic.reload
topic.forum_id.must_equal different_topic.forum_id
end
end
| 1 | 7,017 | The `move_topic` action be a `post` request since it changes the state of the object. | blackducksoftware-ohloh-ui | rb |
@@ -366,6 +366,9 @@ func doTestResults(tid int, state *core.BuildState, target *core.BuildTarget, ru
if runRemotely {
metadata, err = state.RemoteClient.Test(tid, target, run)
+ if metadata == nil {
+ metadata = new(core.BuildMetadata)
+ }
} else {
var stdout []byte
stdout, err = prepareAndRunTest(tid, state, target, run) | 1 | package test
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"sync/atomic"
"time"
"gopkg.in/op/go-logging.v1"
"github.com/thought-machine/please/src/build"
"github.com/thought-machine/please/src/core"
"github.com/thought-machine/please/src/fs"
"github.com/thought-machine/please/src/worker"
)
var log = logging.MustGetLogger("test")
const dummyOutput = "=== RUN DummyTest\n--- PASS: DummyTest (0.00s)\nPASS\n"
const dummyCoverage = "<?xml version=\"1.0\" ?><coverage></coverage>"
// Tag that we attach for xattrs to store hashes against files.
// Note that we are required to provide the user namespace; that seems to be set implicitly
// by the attr utility, but that is not done for us here.
const xattrName = "user.plz_test"
var numUploadFailures int64
const maxUploadFailures int64 = 10
// Test runs the tests for a single target.
func Test(tid int, state *core.BuildState, label core.BuildLabel, remote bool, run int) {
target := state.Graph.TargetOrDie(label)
// Defer this so that no matter what happens in this test run, we always call target.CompleteRun
defer func() {
runsAllCompleted := target.CompleteRun(state)
if runsAllCompleted && state.Config.Test.Upload != "" {
if numUploadFailures < maxUploadFailures {
if err := uploadResults(target, state.Config.Test.Upload.String()); err != nil {
log.Warning("%s", err)
if atomic.AddInt64(&numUploadFailures, 1) >= maxUploadFailures {
log.Error("Failed to upload test results %d times, giving up", maxUploadFailures)
}
}
}
}
}()
state.LogBuildResult(tid, label, core.TargetTesting, "Testing...")
test(tid, state.ForTarget(target), label, target, remote, run)
}
func test(tid int, state *core.BuildState, label core.BuildLabel, target *core.BuildTarget, runRemotely bool, run int) {
hash, err := runtimeHash(state, target, runRemotely, run)
if err != nil {
state.LogBuildError(tid, label, core.TargetTestFailed, err, "Failed to calculate target hash")
return
}
outputFile := path.Join(target.TestDir(run), core.TestResultsFile)
coverageFile := path.Join(target.TestDir(run), core.CoverageFile)
needCoverage := target.NeedCoverage(state)
// If the user passed --shell then just prepare the directory.
if state.PrepareShell {
if err := prepareTestDir(state.Graph, target, run); err != nil {
state.LogBuildError(tid, label, core.TargetTestFailed, err, "Failed to prepare test directory")
} else {
target.SetState(core.Stopped)
state.LogBuildResult(tid, label, core.TargetTestStopped, "Test stopped")
}
return
}
cachedTestResults := func() *core.TestSuite {
log.Debug("Not re-running test %s; got cached results.", label)
coverage := parseCoverageFile(target, target.CoverageFile(), run)
results, err := parseTestResultsFile(target.TestResultsFile())
results.Package = strings.Replace(target.Label.PackageName, "/", ".", -1)
results.Name = target.Label.Name
results.Cached = true
if err != nil {
log.Warningf("Failed to parse cached test file (for %v), Rerunning test. %w", target.Label, err)
state.Cache.Clean(target)
return nil
} else if !results.TestCases.AllSucceeded() {
log.Warning("Test results (for %s) with failures shouldn't be cached - ignoring.", label)
state.Cache.Clean(target)
return nil
} else {
logTestSuccess(state, tid, label, &results, coverage)
}
return &results
}
moveAndCacheOutputFiles := func(results *core.TestSuite, coverage *core.TestCoverage) bool {
// Never cache test results when given arguments; the results may be incomplete.
if len(state.TestArgs) > 0 {
log.Debug("Not caching results for %s, we passed it arguments", label)
return true
}
// Never cache test results if there were failures (usually flaky tests).
if results.Failures() > 0 {
log.Debug("Not caching results for %s, test had failures", label)
return true
}
outs := []string{path.Base(target.TestResultsFile())}
if err := moveOutputFile(state, hash, outputFile, target.TestResultsFile(), dummyOutput); err != nil {
state.LogTestResult(tid, label, core.TargetTestFailed, results, coverage, err, "Failed to move test output file")
return false
}
if needCoverage || core.PathExists(coverageFile) {
if err := moveOutputFile(state, hash, coverageFile, target.CoverageFile(), dummyCoverage); err != nil {
state.LogTestResult(tid, label, core.TargetTestFailed, results, coverage, err, "Failed to move test coverage file")
return false
}
outs = append(outs, path.Base(target.CoverageFile()))
}
for _, output := range target.TestOutputs {
tmpFile := path.Join(target.TestDir(run), output)
outFile := path.Join(target.OutDir(), output)
if err := moveOutputFile(state, hash, tmpFile, outFile, ""); err != nil {
state.LogTestResult(tid, label, core.TargetTestFailed, results, coverage, err, "Failed to move test output file")
return false
}
outs = append(outs, output)
}
if state.Cache != nil && !runRemotely {
state.Cache.Store(target, hash, outs)
}
return true
}
needToRun := func() bool {
if state.ForceRerun {
return true
}
if s := target.State(); (s == core.Unchanged || s == core.Reused) && core.PathExists(target.TestResultsFile()) {
// Output file exists already and appears to be valid. We might still need to rerun though
// if the coverage files aren't available.
if needCoverage && !verifyHash(state, target.CoverageFile(), hash) {
log.Debug("Rerunning %s, coverage file doesn't exist or has wrong hash", target.Label)
return true
} else if !verifyHash(state, target.TestResultsFile(), hash) {
log.Debug("Rerunning %s, results file has incorrect hash", target.Label)
return true
}
return false
}
log.Debug("Output file %s does not exist for %s", target.TestResultsFile(), target.Label)
// Check the cache for these artifacts.
files := []string{path.Base(target.TestResultsFile())}
if needCoverage {
files = append(files, path.Base(target.CoverageFile()))
}
return state.Cache == nil || !state.Cache.Retrieve(target, hash, files)
}
// Don't cache when doing multiple runs, presumably the user explicitly wants to check it.
if state.NumTestRuns == 1 && !runRemotely && !needToRun() {
if cachedResults := cachedTestResults(); cachedResults != nil {
target.Results = *cachedResults
return
}
}
// Remove any cached test result file.
if err := RemoveTestOutputs(target); err != nil {
state.LogBuildError(tid, label, core.TargetTestFailed, err, "Failed to remove test output files")
return
}
if err := startTestWorkerIfNeeded(tid, state, target); err != nil {
state.LogBuildError(tid, label, core.TargetTestFailed, fmt.Errorf("failed to start test worker: %w", err), "Failed to start test worker")
return
}
target.StartTestSuite()
coverage := &core.TestCoverage{}
if state.NumTestRuns == 1 {
var results core.TestSuite
results, coverage = doFlakeRun(tid, state, target, runRemotely)
target.AddTestResults(results)
if target.Results.TestCases.AllSucceeded() {
// Success, store in cache
moveAndCacheOutputFiles(&target.Results, coverage)
}
} else if state.TestSequentially {
for run := 1; run <= state.NumTestRuns; run++ {
state.LogBuildResult(tid, target.Label, core.TargetTesting, getRunStatus(run, state.NumTestRuns))
var results core.TestSuite
results, coverage = doTest(tid, state, target, runRemotely, 1) // Sequential tests re-use run 1's test dir
target.AddTestResults(results)
}
} else {
state.LogBuildResult(tid, target.Label, core.TargetTesting, getRunStatus(run, state.NumTestRuns))
var results core.TestSuite
results, coverage = doTest(tid, state, target, runRemotely, run)
target.AddTestResults(results)
}
logTargetResults(tid, state, target, coverage, run)
}
// doFlakeRun runs a test repeatably until it succeeds or exceeds the max number of flakes for the test
func doFlakeRun(tid int, state *core.BuildState, target *core.BuildTarget, runRemotely bool) (core.TestSuite, *core.TestCoverage) {
coverage := &core.TestCoverage{}
results := core.TestSuite{}
// New group of test cases for each group of flaky runs
for flakes := 1; flakes <= target.Flakiness; flakes++ {
state.LogBuildResult(tid, target.Label, core.TargetTesting, getFlakeStatus(flakes, target.Flakiness))
testSuite, cov := doTest(tid, state, target, runRemotely, 1) // If we're running flakes, numRuns must be 1
results.TimedOut = results.TimedOut || testSuite.TimedOut
results.Properties = testSuite.Properties
results.Duration += testSuite.Duration
// Each set of executions is treated as a group
// So if a test flakes three times, three executions will be part of one test case.
results.Add(testSuite.TestCases...)
coverage.Aggregate(cov)
// If execution succeeded, we can break out of the flake loop
if testSuite.TestCases.AllSucceeded() {
results.Cached = testSuite.Cached
break
}
}
return results, coverage
}
func getFlakeStatus(flake int, flakes int) string {
if flakes == 1 {
return "Testing..."
}
return fmt.Sprintf("Testing (flake %d of %d)...", flake, flakes)
}
func getRunStatus(run int, numRuns int) string {
if numRuns == 1 {
return "Testing..."
}
return fmt.Sprintf("Testing (run %d of %d)...", run, numRuns)
}
func logTargetResults(tid int, state *core.BuildState, target *core.BuildTarget, coverage *core.TestCoverage, run int) {
if target.Results.TestCases.AllSucceeded() {
// Clean up the test directory.
if state.CleanWorkdirs {
if err := os.RemoveAll(target.TestDir(run)); err != nil {
log.Warning("Failed to remove test directory for %s: %s", target.Label, err)
}
}
logTestSuccess(state, tid, target.Label, &target.Results, coverage)
return
}
var resultErr error
var resultMsg string
if target.Results.Failures() > 0 {
resultMsg = "Tests failed"
for _, testCase := range target.Results.TestCases {
if len(testCase.Failures()) > 0 {
resultErr = fmt.Errorf(testCase.Failures()[0].Failure.Message)
}
}
} else if target.Results.Errors() > 0 {
resultMsg = "Tests errored"
for _, testCase := range target.Results.TestCases {
if len(testCase.Errors()) > 0 {
resultErr = fmt.Errorf(testCase.Errors()[0].Error.Message)
}
}
} else {
resultErr = fmt.Errorf("unknown error")
resultMsg = "Something went wrong"
}
state.LogTestResult(tid, target.Label, core.TargetTestFailed, &target.Results, coverage, resultErr, resultMsg)
}
func logTestSuccess(state *core.BuildState, tid int, label core.BuildLabel, results *core.TestSuite, coverage *core.TestCoverage) {
var description string
tests := pluralise("test", results.Tests())
if results.Skips() != 0 {
description = fmt.Sprintf("%d %s passed. %d skipped",
results.Tests(), tests, results.Skips())
} else {
description = fmt.Sprintf("%d %s passed.", len(results.TestCases), tests)
}
state.LogTestResult(tid, label, core.TargetTested, results, coverage, nil, description)
}
func pluralise(word string, quantity int) string {
if quantity == 1 {
return word
}
return word + "s"
}
func prepareTestDir(graph *core.BuildGraph, target *core.BuildTarget, run int) error {
if err := os.RemoveAll(target.TestDir(run)); err != nil {
return err
}
if err := os.MkdirAll(target.TestDir(run), core.DirPermissions); err != nil {
return err
}
for out := range core.IterRuntimeFiles(graph, target, true, run) {
if err := core.PrepareSourcePair(out); err != nil {
return err
}
}
return nil
}
// testCommandAndEnv returns the test command & environment for a target.
func testCommandAndEnv(state *core.BuildState, target *core.BuildTarget, run int) (string, []string, error) {
replacedCmd, err := core.ReplaceTestSequences(state, target, target.GetTestCommand(state))
env := core.TestEnvironment(state, target, path.Join(core.RepoRoot, target.TestDir(run)))
if len(state.TestArgs) > 0 {
args := strings.Join(state.TestArgs, " ")
replacedCmd += " " + args
env = append(env, "TESTS="+args)
}
return replacedCmd, env, err
}
func runTest(state *core.BuildState, target *core.BuildTarget, run int) ([]byte, error) {
replacedCmd, env, err := testCommandAndEnv(state, target, run)
if err != nil {
return nil, err
}
log.Debugf("Running test %s#%d\nENVIRONMENT:\n%s\n%s", target.Label, run, strings.Join(env, "\n"), replacedCmd)
_, stderr, err := state.ProcessExecutor.ExecWithTimeoutShellStdStreams(target, target.TestDir(run), env, target.TestTimeout, state.ShowAllOutput, replacedCmd, target.TestSandbox, state.DebugTests)
return stderr, err
}
func doTest(tid int, state *core.BuildState, target *core.BuildTarget, runRemotely bool, run int) (core.TestSuite, *core.TestCoverage) {
startTime := time.Now()
metadata, resultsData, coverage, err := doTestResults(tid, state, target, runRemotely, run)
duration := time.Since(startTime)
parsedSuite := parseTestOutput(string(metadata.Stdout), string(metadata.Stderr), err, duration, target, resultsData)
return core.TestSuite{
Package: strings.Replace(target.Label.PackageName, "/", ".", -1),
Name: target.Label.Name,
Duration: duration,
TimedOut: err == context.DeadlineExceeded,
Properties: parsedSuite.Properties,
TestCases: parsedSuite.TestCases,
Cached: metadata.Cached,
}, coverage
}
func doTestResults(tid int, state *core.BuildState, target *core.BuildTarget, runRemotely bool, run int) (*core.BuildMetadata, [][]byte, *core.TestCoverage, error) {
var err error
var metadata *core.BuildMetadata
if runRemotely {
metadata, err = state.RemoteClient.Test(tid, target, run)
} else {
var stdout []byte
stdout, err = prepareAndRunTest(tid, state, target, run)
metadata = &core.BuildMetadata{Stdout: stdout}
}
coverage := parseCoverageFile(target, path.Join(target.TestDir(run), core.CoverageFile), run)
var data [][]byte
// If this test is meant to produce an output file and the test ran successfully
if !target.NoTestOutput {
d, readErr := readTestResultsDir(path.Join(target.TestDir(run), core.TestResultsFile))
if readErr != nil {
log.Warningf("failed to read test results file: %v", readErr)
} else {
data = d
}
}
return metadata, data, coverage, err
}
// prepareAndRunTest sets up a test directory and runs the test.
func prepareAndRunTest(tid int, state *core.BuildState, target *core.BuildTarget, run int) (stdout []byte, err error) {
if err = prepareTestDir(state.Graph, target, run); err != nil {
state.LogBuildError(tid, target.Label, core.TargetTestFailed, err, "Failed to prepare test directory for %s: %s", target.Label, err)
return []byte{}, err
}
return runTest(state, target, run)
}
func parseTestOutput(stdout string, stderr string, runError error, duration time.Duration, target *core.BuildTarget, resultsData [][]byte) core.TestSuite {
// This is all pretty involved; there are lots of different possibilities of what could happen.
// The contract is that the test must return zero on success or non-zero on failure (Unix FTW).
// If it's successful, it must produce a parseable file named "test.results" in its temp folder.
// (alternatively, this can be a directory containing parseable files).
// Tests can opt out of the file requirement individually, in which case they're judged only
// by their return value.
// But of course, we still have to consider all the alternatives here and handle them nicely.
// No output and no execution error and output not expected - OK
// No output and no execution error and output expected - SYNTHETIC ERROR - Missing Results
// No output and execution error - SYNTHETIC ERROR - Failed to Run
// Output fails to parse - SYNTHETIC ERROR - Failed to parse output
// Output fails to parse with execution error - SYNTHETIC ERROR + EXECUTION ERROR - Failed to parse output
// Output and no execution error - PARSE OUTPUT - Ignore noTestOutput
// Output and execution error - PARSE OUTPUT + SYNTHETIC ERROR - Incomplete Run
failSuite := func(msg, resultType, traceback string) core.TestSuite {
return core.TestSuite{
TestCases: []core.TestCase{
{
Name: target.Results.Name,
Executions: []core.TestExecution{
{
Duration: &duration,
Stdout: stdout,
Stderr: stderr,
Error: &core.TestResultFailure{
Message: msg,
Type: resultType,
Traceback: traceback,
},
},
},
},
},
}
}
if len(resultsData) == 0 {
if runError == nil {
// No output and no execution error and output not expected - OK
if target.NoTestOutput {
return core.TestSuite{
TestCases: []core.TestCase{
{
// Need a name so that multiple runs get collated correctly.
Name: target.Results.Name,
Executions: []core.TestExecution{
{
Duration: &duration,
Stdout: stdout,
Stderr: stderr,
},
},
},
},
}
}
//No output and no execution error and output expected - SYNTHETIC ERROR - Missing Results
return failSuite("Test failed to produce output results file", "MissingResults", "")
}
return failSuite("Test failed", "TestFailed", runError.Error())
}
results, parseError := parseTestResults(resultsData)
if parseError != nil {
// Output fails to parse with execution error - SYNTHETIC ERROR + EXECUTION ERROR - Failed to parse output
if runError != nil {
return failSuite("Test failed with no results", "NoResults", runError.Error())
}
// Output fails to parse - SYNTHETIC ERROR - Failed to parse output
return failSuite("Couldn't parse test output file", "NoResults", parseError.Error())
}
// Output and no execution error - PARSE OUTPUT - Ignore noTestOutput
if runError != nil && results.Failures() == 0 {
// Add a failure result to the test so it shows up in the final aggregation.
results.Add(failSuite("Test returned nonzero but reported no errors", "ReturnValue", runError.Error()).TestCases...)
} else if runError == nil && results.Failures() != 0 {
results.Add(failSuite("Test returned 0 but still reported failures", "ReturnValue", "").TestCases...)
}
return results
}
// Parses the coverage output for a single target.
func parseCoverageFile(target *core.BuildTarget, coverageFile string, run int) *core.TestCoverage {
coverage, err := parseTestCoverageFile(target, coverageFile, run)
if err != nil {
log.Errorf("Failed to parse coverage file for %s: %s", target.Label, err)
}
return coverage
}
// RemoveTestOutputs removes any cached test or coverage result files for a target.
func RemoveTestOutputs(target *core.BuildTarget) error {
if err := os.RemoveAll(target.TestResultsFile()); err != nil {
return err
} else if err := os.RemoveAll(target.CoverageFile()); err != nil {
return err
}
for _, output := range target.TestOutputs {
if err := os.RemoveAll(path.Join(target.OutDir(), output)); err != nil {
return err
}
}
return nil
}
// moveOutputFile moves an output file from the temporary directory to its permanent location.
// If dummy is given, it writes that into the destination if the file doesn't exist.
func moveOutputFile(state *core.BuildState, hash []byte, from, to, dummy string) error {
if err := fs.EnsureDir(to); err != nil {
return err
}
if !core.PathExists(from) {
if dummy == "" {
return nil
}
if err := ioutil.WriteFile(to, []byte(dummy), 0644); err != nil {
return err
}
} else if err := os.Rename(from, to); err != nil {
return err
}
// Set the hash on the new destination file
return fs.RecordAttr(to, hash, xattrName, state.XattrsSupported)
}
// startTestWorkerIfNeeded starts a worker server if the test needs one.
func startTestWorkerIfNeeded(tid int, state *core.BuildState, target *core.BuildTarget) error {
workerCmd, _, testCmd, err := core.TestWorkerCommand(state, target)
if err != nil {
return err
} else if workerCmd == "" {
return nil
}
state.LogBuildResult(tid, target.Label, core.TargetTesting, "Starting test worker...")
resp, err := worker.EnsureWorkerStarted(state, workerCmd, testCmd, target)
if err == nil {
state.LogBuildResult(tid, target.Label, core.TargetTesting, "Testing...")
if resp.Command != "" {
log.Debug("Setting test command for %s to %s", target.Label, resp.Command)
target.TestCommand = resp.Command
}
}
return err
}
// verifyHash verifies that the hash on a test file matches the one for the current test.
func verifyHash(state *core.BuildState, filename string, hash []byte) bool {
return bytes.Equal(hash, fs.ReadAttr(filename, xattrName, state.XattrsSupported))
}
// runtimeHash returns the runtime hash of a target, or an empty slice if running remotely.
func runtimeHash(state *core.BuildState, target *core.BuildTarget, runRemotely bool, run int) ([]byte, error) {
if runRemotely {
return nil, nil
}
hash, err := build.RuntimeHash(state, target, run)
if err == nil {
hash = core.CollapseHash(hash)
}
return hash, err
}
| 1 | 9,455 | Should we just return a non-nil metadata from `Test`? | thought-machine-please | go |
@@ -15,6 +15,10 @@ class MailPreview < MailView
CommunicartMailer.comment_added_email(comment, email)
end
+ def whsc_email
+ CommunicartMailer.cart_notification_email(email, pending_approval)
+ end
+
private
| 1 | class MailPreview < MailView
def cart_notification_email
CommunicartMailer.cart_notification_email(email, pending_approval)
end
def cart_observer_email
CommunicartMailer.cart_observer_email(email, cart)
end
def approval_reply_received_email
CommunicartMailer.approval_reply_received_email(received_approval)
end
def comment_added_email
CommunicartMailer.comment_added_email(comment, email)
end
private
def email
'[email protected]'
end
def pending_approval
Approval.pending.last
end
def received_approval
Approval.received.last
end
def cart
Cart.last
end
def comment
Comment.where(commentable_type: 'Cart').last
end
end
| 1 | 12,229 | Noticing this is really just the same as cart_notification_email. Would be nice to be able to view custom templates. I may update this with some set up that will make viewing specific custom emails easily. | 18F-C2 | rb |
@@ -59,13 +59,8 @@ import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.READ_ONLY;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
-import static org.apache.solr.common.params.CollectionAdminParams.ALIAS;
-import static org.apache.solr.common.params.CollectionAdminParams.COLL_CONF;
-import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP;
-import static org.apache.solr.common.params.CollectionAdminParams.CREATE_NODE_SET_PARAM;
-import static org.apache.solr.common.params.CollectionAdminParams.CREATE_NODE_SET_SHUFFLE_PARAM;
-import static org.apache.solr.common.params.CollectionAdminParams.ROUTER_PREFIX;
-import static org.apache.solr.common.params.CollectionAdminParams.SKIP_NODE_ASSIGNMENT;
+import static org.apache.solr.common.params.CollectionAdminParams.*;
+import static org.apache.solr.common.params.CollectionAdminParams.COLOCATED_WITH;
/**
* This class is experimental and subject to change. | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.client.solrj.request;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.Set;
import java.util.TimeZone;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.apache.solr.client.solrj.RoutedAliasTypes;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrResponse;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.V2RequestSupport;
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
import org.apache.solr.client.solrj.response.RequestStatusState;
import org.apache.solr.client.solrj.util.SolrIdentifierValidator;
import org.apache.solr.common.MapWriter;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ImplicitDocRouter;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionAdminParams;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.CollectionParams.CollectionAction;
import org.apache.solr.common.params.CommonAdminParams;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.ShardParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.NamedList;
import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.READ_ONLY;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
import static org.apache.solr.common.params.CollectionAdminParams.ALIAS;
import static org.apache.solr.common.params.CollectionAdminParams.COLL_CONF;
import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP;
import static org.apache.solr.common.params.CollectionAdminParams.CREATE_NODE_SET_PARAM;
import static org.apache.solr.common.params.CollectionAdminParams.CREATE_NODE_SET_SHUFFLE_PARAM;
import static org.apache.solr.common.params.CollectionAdminParams.ROUTER_PREFIX;
import static org.apache.solr.common.params.CollectionAdminParams.SKIP_NODE_ASSIGNMENT;
/**
* This class is experimental and subject to change.
*
* @since solr 4.5
*/
public abstract class CollectionAdminRequest<T extends CollectionAdminResponse> extends SolrRequest<T> implements V2RequestSupport, MapWriter {
/**
* The set of modifiable collection properties
*/
public static final java.util.List<String> MODIFIABLE_COLLECTION_PROPERTIES = Arrays.asList(
REPLICATION_FACTOR,
COLL_CONF,
READ_ONLY);
protected final CollectionAction action;
public static String PROPERTY_PREFIX = "property.";
public CollectionAdminRequest(CollectionAction action) {
this("/admin/collections", action);
}
public CollectionAdminRequest(String path, CollectionAction action) {
super(METHOD.GET, path);
this.action = checkNotNull(CoreAdminParams.ACTION, action);
}
@Override
@SuppressWarnings({"rawtypes"})
public SolrRequest getV2Request() {
return usev2 ?
V1toV2ApiMapper.convert(this).useBinary(useBinaryV2).build() :
this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(CoreAdminParams.ACTION, action.toString());
return params;
}
protected void addProperties(ModifiableSolrParams params, Properties props) {
for (String propertyName : props.stringPropertyNames()) {
params.set(PROPERTY_PREFIX + propertyName, props.getProperty(propertyName));
}
}
@Override
public void writeMap(EntryWriter ew) throws IOException {
ew.put("class", this.getClass().getName());
ew.put("method", getMethod().toString());
SolrParams params = getParams();
if (params != null) {
for (Iterator<String> it = params.getParameterNamesIterator(); it.hasNext(); ) {
final String name = it.next();
final String [] values = params.getParams(name);
for (String value : values) {
ew.put("params." + name, value);
}
}
}
}
@Override
public String toString() {
return jsonStr();
}
@Override
public String getRequestType() {
return SolrRequestType.ADMIN.toString();
}
/**
* Base class for asynchronous collection admin requests
*/
public abstract static class AsyncCollectionAdminRequest extends CollectionAdminRequest<CollectionAdminResponse> {
protected String asyncId = null;
protected boolean waitForFinalState = false;
public AsyncCollectionAdminRequest(CollectionAction action) {
super(action);
}
@Override
protected CollectionAdminResponse createResponse(SolrClient client) {
return new CollectionAdminResponse();
}
private static String generateAsyncId() {
return UUID.randomUUID().toString();
}
public String getAsyncId() {
return asyncId;
}
public void setWaitForFinalState(boolean waitForFinalState) {
this.waitForFinalState = waitForFinalState;
}
public void setAsyncId(String asyncId) {
this.asyncId = asyncId;
}
/**
* Process this request asynchronously, generating and returning a request id
* @param client a Solr client
* @return the request id
* @see CollectionAdminRequest.RequestStatus
*/
public String processAsync(SolrClient client) throws IOException, SolrServerException {
return processAsync(generateAsyncId(), client);
}
/**
* Process this request asynchronously, using a specified request id
* @param asyncId the request id
* @param client a Solr client
* @return the request id
*/
public String processAsync(String asyncId, SolrClient client) throws IOException, SolrServerException {
this.asyncId = asyncId;
NamedList<Object> resp = client.request(this);
if (resp.get("error") != null) {
throw new SolrServerException((String)resp.get("error"));
}
return (String) resp.get("requestid");
}
/**
* Send this request to a Solr server, and wait (up to a timeout) for the request to
* complete or fail
* @param client a Solr client
* @param timeoutSeconds the maximum time to wait
* @return the status of the request on completion or timeout
*/
public RequestStatusState processAndWait(SolrClient client, long timeoutSeconds)
throws SolrServerException, InterruptedException, IOException {
return processAndWait(generateAsyncId(), client, timeoutSeconds);
}
/**
* Send this request to a Solr server, and wait (up to a timeout) for the request to
* complete or fail
* @param asyncId an id for the request
* @param client a Solr client
* @param timeoutSeconds the maximum time to wait
* @return the status of the request on completion or timeout
*/
public RequestStatusState processAndWait(String asyncId, SolrClient client, long timeoutSeconds)
throws IOException, SolrServerException, InterruptedException {
processAsync(asyncId, client);
return requestStatus(asyncId).waitFor(client, timeoutSeconds);
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
if (asyncId != null) {
params.set(CommonAdminParams.ASYNC, asyncId);
}
if (waitForFinalState) {
params.set(CommonAdminParams.WAIT_FOR_FINAL_STATE, waitForFinalState);
}
return params;
}
}
protected abstract static class AsyncCollectionSpecificAdminRequest extends AsyncCollectionAdminRequest {
protected String collection;
protected Boolean followAliases;
public AsyncCollectionSpecificAdminRequest(CollectionAction action, String collection) {
super(action);
this.collection = checkNotNull(CoreAdminParams.COLLECTION, collection);
}
public String getCollectionName() {
return collection;
}
public void setFollowAliases(Boolean followAliases) {
this.followAliases = followAliases;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
params.set(CoreAdminParams.NAME, collection);
params.setNonNull(CollectionAdminParams.FOLLOW_ALIASES, followAliases);
return params;
}
}
protected abstract static class AsyncShardSpecificAdminRequest extends AsyncCollectionAdminRequest {
protected String collection;
protected String shard;
public AsyncShardSpecificAdminRequest(CollectionAction action, String collection, String shard) {
super(action);
this.collection = checkNotNull(CoreAdminParams.COLLECTION, collection);
this.shard = checkNotNull(CoreAdminParams.SHARD, shard);
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
params.set(CoreAdminParams.COLLECTION, collection);
params.set(CoreAdminParams.SHARD, shard);
return params;
}
}
@SuppressWarnings({"rawtypes"})
protected abstract static class ShardSpecificAdminRequest extends CollectionAdminRequest {
protected String collection;
protected String shard;
public ShardSpecificAdminRequest(CollectionAction action, String collection, String shard) {
super(action);
this.collection = checkNotNull(CoreAdminParams.COLLECTION, collection);
this.shard = checkNotNull(CoreAdminParams.SHARD, shard);
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
params.set(CoreAdminParams.COLLECTION, collection);
params.set(CoreAdminParams.SHARD, shard);
return params;
}
@Override
protected SolrResponse createResponse(SolrClient client) {
return new CollectionAdminResponse();
}
}
//---------------------------------------------------------------------------------------
//
//---------------------------------------------------------------------------------------
protected abstract static class CollectionAdminRoleRequest extends AsyncCollectionAdminRequest {
protected String node;
protected String role;
public CollectionAdminRoleRequest(CollectionAction action, String node, String role) {
super(action);
this.role = checkNotNull(CollectionAdminParams.ROLE, role);
this.node = checkNotNull(CoreAdminParams.NODE, node);
}
public String getNode() {
return this.node;
}
public String getRole() {
return this.role;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
params.set(CollectionAdminParams.ROLE, this.role);
params.set(CoreAdminParams.NODE, this.node);
return params;
}
}
/** Specific Collection API call implementations **/
/**
* Returns a SolrRequest for creating a collection
* @param collection the collection name
* @param config the collection config
* @param numShards the number of shards in the collection
* @param numNrtReplicas the number of {@link org.apache.solr.common.cloud.Replica.Type#NRT} replicas
* @param numTlogReplicas the number of {@link org.apache.solr.common.cloud.Replica.Type#TLOG} replicas
* @param numPullReplicas the number of {@link org.apache.solr.common.cloud.Replica.Type#PULL} replicas
*/
public static Create createCollection(String collection, String config, Integer numShards, Integer numNrtReplicas, Integer numTlogReplicas, Integer numPullReplicas) {
return new Create(collection, config, numShards, numNrtReplicas, numTlogReplicas, numPullReplicas);
}
/**
* Returns a SolrRequest for creating a collection
* @param collection the collection name
* @param config the collection config
* @param numShards the number of shards in the collection
* @param numReplicas the replication factor of the collection (same as numNrtReplicas)
*/
public static Create createCollection(String collection, String config, int numShards, int numReplicas) {
return new Create(collection, config, numShards, numReplicas, null, null);
}
/**
* Returns a SolrRequest for creating a collection using a default configSet
*
* This requires that there is either a single configset configured in the cluster, or
* that there is a configset with the same name as the collection
*
* @param collection the collection name
* @param numShards the number of shards in the collection
* @param numReplicas the replication factor of the collection
*/
public static Create createCollection(String collection, int numShards, int numReplicas) {
return new Create(collection, null, numShards, numReplicas, 0, 0);
}
/**
* Returns a SolrRequest for creating a collection with the implicit router
* @param collection the collection name
* @param config the collection config
* @param shards a shard definition string
* @param numReplicas the replication factor of the collection
*/
public static Create createCollectionWithImplicitRouter(String collection, String config, String shards, int numReplicas) {
return new Create(collection, config, shards, numReplicas);
}
/**
* Returns a SolrRequest for creating a collection with the implicit router and specific types of replicas
* @param collection the collection name
* @param config the collection config
* @param shards a shard definition string
* @param numNrtReplicas the number of replicas of type {@link org.apache.solr.common.cloud.Replica.Type#NRT}
* @param numTlogReplicas the number of replicas of type {@link org.apache.solr.common.cloud.Replica.Type#TLOG}
* @param numPullReplicas the number of replicas of type {@link org.apache.solr.common.cloud.Replica.Type#PULL}
*/
public static Create createCollectionWithImplicitRouter(String collection, String config, String shards, int numNrtReplicas, int numTlogReplicas, int numPullReplicas) {
return new Create(collection, config, ImplicitDocRouter.NAME, null, checkNotNull("shards",shards), numNrtReplicas, numTlogReplicas, numPullReplicas);
}
/**
* Returns a SolrRequest for modifying a collection with the given properties
* @param collection the collection name
* @param properties a map of key and values with which the collection is to be modified
*/
public static Modify modifyCollection(String collection, Map<String, Object> properties) {
return new Modify(collection, properties);
}
// CREATE request
public static class Create extends AsyncCollectionSpecificAdminRequest {
protected String configName = null;
protected String createNodeSet = null;
protected String routerName;
protected String policy;
protected String shards;
protected String routerField;
protected Integer numShards;
protected Integer nrtReplicas;
protected Integer pullReplicas;
protected Integer tlogReplicas;
protected Properties properties;
protected String alias;
protected String[] rule , snitch;
/** Constructor intended for typical use cases */
protected Create(String collection, String config, Integer numShards, Integer numNrtReplicas, Integer numTlogReplicas, Integer numPullReplicas) { // TODO: maybe add other constructors
this(collection, config, null, numShards, null, numNrtReplicas, numTlogReplicas, numPullReplicas);
}
/** Constructor that assumes {@link ImplicitDocRouter#NAME} and an explicit list of <code>shards</code> */
protected Create(String collection, String config, String shards, int numNrtReplicas) {
this(collection, config, ImplicitDocRouter.NAME, null, checkNotNull("shards",shards), numNrtReplicas, null, null);
}
private Create(String collection, String config, String routerName, Integer numShards, String shards, Integer numNrtReplicas, Integer numTlogReplicas, Integer numPullReplicas) {
super(CollectionAction.CREATE, SolrIdentifierValidator.validateCollectionName(collection));
// NOTE: there's very little we can assert about the args because nothing but "collection" is required by the server
if ((null != shards) && (null != numShards)) {
throw new IllegalArgumentException("Can not specify both a numShards and a list of shards");
}
this.configName = config;
this.routerName = routerName;
this.numShards = numShards;
this.setShards(shards);
this.nrtReplicas = numNrtReplicas;
this.tlogReplicas = numTlogReplicas;
this.pullReplicas = numPullReplicas;
}
public Create setCreateNodeSet(String nodeSet) { this.createNodeSet = nodeSet; return this; }
public Create setRouterName(String routerName) { this.routerName = routerName; return this; }
public Create setRouterField(String routerField) { this.routerField = routerField; return this; }
public Create setNrtReplicas(Integer nrtReplicas) { this.nrtReplicas = nrtReplicas; return this;}
public Create setTlogReplicas(Integer tlogReplicas) { this.tlogReplicas = tlogReplicas; return this;}
public Create setPullReplicas(Integer pullReplicas) { this.pullReplicas = pullReplicas; return this;}
public Create setReplicationFactor(Integer repl) { this.nrtReplicas = repl; return this; }
public Create setRule(String... s){ this.rule = s; return this; }
public Create setSnitch(String... s){ this.snitch = s; return this; }
public Create setAlias(String alias) {
this.alias = alias;
return this;
}
public String getConfigName() { return configName; }
public String getCreateNodeSet() { return createNodeSet; }
public String getRouterName() { return routerName; }
public String getShards() { return shards; }
public Integer getNumShards() { return numShards; }
public Integer getReplicationFactor() { return getNumNrtReplicas(); }
public Integer getNumNrtReplicas() { return nrtReplicas; }
public Integer getNumTlogReplicas() {return tlogReplicas;}
public Integer getNumPullReplicas() {return pullReplicas;}
/**
* Provide the name of the shards to be created, separated by commas
*
* Shard names must consist entirely of periods, underscores, hyphens, and alphanumerics. Other characters are not allowed.
*
* @throws IllegalArgumentException if any of the shard names contain invalid characters.
*/
public Create setShards(String shards) {
if (null != shards) {
for (String shard : shards.split(",")) {
SolrIdentifierValidator.validateShardName(shard);
}
}
this.shards = shards;
return this;
}
public Properties getProperties() {
return properties;
}
public Create setProperties(Properties properties) {
this.properties = properties;
return this;
}
public Create setProperties(Map<String, String> properties) {
this.properties = new Properties();
this.properties.putAll(properties);
return this;
}
public Create withProperty(String key, String value) {
if (this.properties == null)
this.properties = new Properties();
this.properties.setProperty(key, value);
return this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
if (configName != null)
params.set("collection.configName", configName);
if (createNodeSet != null)
params.set(CREATE_NODE_SET_PARAM, createNodeSet);
if (numShards != null) {
params.set( ZkStateReader.NUM_SHARDS_PROP, numShards);
}
if (routerName != null)
params.set( "router.name", routerName);
if (shards != null)
params.set("shards", shards);
if (routerField != null) {
params.set("router.field", routerField);
}
if (nrtReplicas != null) {
params.set( ZkStateReader.NRT_REPLICAS, nrtReplicas);
}
if (properties != null) {
addProperties(params, properties);
}
if (pullReplicas != null) {
params.set(ZkStateReader.PULL_REPLICAS, pullReplicas);
}
if (tlogReplicas != null) {
params.set(ZkStateReader.TLOG_REPLICAS, tlogReplicas);
}
params.setNonNull(ALIAS, alias);
return params;
}
public Create setPolicy(String policy) {
this.policy = policy;
return this;
}
}
/**
* Returns a SolrRequest to reload a collection
*/
public static Reload reloadCollection(String collection) {
return new Reload(collection);
}
// RELOAD request
public static class Reload extends AsyncCollectionSpecificAdminRequest {
private Reload(String collection) {
super(CollectionAction.RELOAD, collection);
}
}
public static Rename renameCollection(String collection, String target) {
return new Rename(collection, target);
}
public static class Rename extends AsyncCollectionSpecificAdminRequest {
String target;
public Rename(String collection, String target) {
super(CollectionAction.RENAME, collection);
this.target = target;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.set(CollectionAdminParams.TARGET, target);
return params;
}
}
/**
* Returns a SolrRequest to delete a node.
*/
public static DeleteNode deleteNode(String node) {
return new DeleteNode(node);
}
public static class DeleteNode extends AsyncCollectionAdminRequest {
String node;
/**
* @param node The node to be deleted
*/
public DeleteNode(String node) {
super(CollectionAction.DELETENODE);
this.node = checkNotNull("node",node);
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.set(CoreAdminParams.NODE, node);
return params;
}
}
public static class ReplaceNode extends AsyncCollectionAdminRequest {
String sourceNode, targetNode;
Boolean parallel;
/**
* @param source node to be cleaned up
* @param target node where the new replicas are to be created
*/
public ReplaceNode(String source, String target) {
super(CollectionAction.REPLACENODE);
this.sourceNode = checkNotNull(CollectionParams.SOURCE_NODE, source);
this.targetNode = target;
}
public ReplaceNode setParallel(Boolean flag) {
this.parallel = flag;
return this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.set(CollectionParams.SOURCE_NODE, sourceNode);
params.set(CollectionParams.TARGET_NODE, targetNode);
if (parallel != null) params.set("parallel", parallel.toString());
return params;
}
}
public static MoveReplica moveReplica(String collection, String replica, String targetNode) {
return new MoveReplica(collection, replica, targetNode);
}
public static class MoveReplica extends AsyncCollectionAdminRequest {
protected String collection, replica, targetNode;
protected String shard, sourceNode;
protected boolean randomlyMoveReplica;
protected boolean inPlaceMove = true;
protected int timeout = -1;
public MoveReplica(String collection, String replica, String targetNode) {
super(CollectionAction.MOVEREPLICA);
this.collection = checkNotNull(CoreAdminParams.COLLECTION, collection);
this.replica = checkNotNull(CoreAdminParams.REPLICA, replica);
this.targetNode = checkNotNull(CollectionParams.TARGET_NODE, targetNode);
this.randomlyMoveReplica = false;
}
public MoveReplica(String collection, String shard, String sourceNode, String targetNode) {
super(CollectionAction.MOVEREPLICA);
this.collection = checkNotNull(CoreAdminParams.COLLECTION, collection);
this.shard = checkNotNull(CoreAdminParams.SHARD, shard);
this.sourceNode = checkNotNull(CollectionParams.SOURCE_NODE, sourceNode);
this.targetNode = checkNotNull(CollectionParams.TARGET_NODE, targetNode);
this.randomlyMoveReplica = true;
}
public void setInPlaceMove(boolean inPlaceMove) {
this.inPlaceMove = inPlaceMove;
}
public void setTimeout(int timeout) {
this.timeout = timeout;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.set(CoreAdminParams.COLLECTION, collection);
params.set(CollectionParams.TARGET_NODE, targetNode);
params.set(CommonAdminParams.IN_PLACE_MOVE, inPlaceMove);
if (timeout != -1) {
params.set(CommonAdminParams.TIMEOUT, timeout);
}
if (randomlyMoveReplica) {
params.set(CoreAdminParams.SHARD, shard);
params.set(CollectionParams.SOURCE_NODE, sourceNode);
} else {
params.set(CoreAdminParams.REPLICA, replica);
}
return params;
}
}
/*
* Returns a RebalanceLeaders object to rebalance leaders for a collection
*/
public static RebalanceLeaders rebalanceLeaders(String collection) {
return new RebalanceLeaders(collection);
}
public static class RebalanceLeaders extends AsyncCollectionAdminRequest {
protected Integer maxAtOnce;
protected Integer maxWaitSeconds;
protected String collection;
public RebalanceLeaders setMaxAtOnce(Integer maxAtOnce) {
this.maxAtOnce = maxAtOnce;
return this;
}
public RebalanceLeaders setMaxWaitSeconds(Integer maxWaitSeconds) {
this.maxWaitSeconds = maxWaitSeconds;
return this;
}
public Integer getMaxAtOnce() {
return maxAtOnce;
}
public Integer getMaxWaitSeconds() {
return maxWaitSeconds;
}
public RebalanceLeaders(String collection) {
super(CollectionAction.REBALANCELEADERS);
this.collection = checkNotNull(CoreAdminParams.COLLECTION, collection);
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.set(CoreAdminParams.COLLECTION, collection);
if(this.maxWaitSeconds != null) {
params.set("maxWaitSeconds", this.maxWaitSeconds);
}
if(this.maxAtOnce != null) {
params.set("maxAtOnce", this.maxAtOnce);
}
return params;
}
}
/**
* Returns a SolrRequest to reindex a collection
*/
public static ReindexCollection reindexCollection(String collection) {
return new ReindexCollection(collection);
}
public static class ReindexCollection extends AsyncCollectionSpecificAdminRequest {
String target;
String query;
String fields;
String configName;
Boolean removeSource;
String cmd;
Integer batchSize;
Map<String, Object> collectionParams = new HashMap<>();
private ReindexCollection(String collection) {
super(CollectionAction.REINDEXCOLLECTION, collection);
}
/** Target collection name (null if the same). */
public ReindexCollection setTarget(String target) {
this.target = target;
return this;
}
/** Set optional command (eg. abort, status). */
public ReindexCollection setCommand(String command) {
this.cmd = command;
return this;
}
/** Query matching the documents to reindex (default is '*:*'). */
public ReindexCollection setQuery(String query) {
this.query = query;
return this;
}
/** Fields to reindex (the same syntax as {@link CommonParams#FL}), default is '*'. */
public ReindexCollection setFields(String fields) {
this.fields = fields;
return this;
}
/** Remove source collection after success. Default is false. */
public ReindexCollection setRemoveSource(boolean removeSource) {
this.removeSource = removeSource;
return this;
}
/** Copy documents in batches of this size. Default is 100. */
public ReindexCollection setBatchSize(int batchSize) {
this.batchSize = batchSize;
return this;
}
/** Config name for the target collection. Default is the same as source. */
public ReindexCollection setConfigName(String configName) {
this.configName = configName;
return this;
}
/** Set other supported collection CREATE parameters. */
public ReindexCollection setCollectionParam(String key, Object value) {
this.collectionParams.put(key, value);
return this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.setNonNull("target", target);
params.setNonNull("cmd", cmd);
params.setNonNull(ZkStateReader.CONFIGNAME_PROP, configName);
params.setNonNull(CommonParams.Q, query);
params.setNonNull(CommonParams.FL, fields);
params.setNonNull("removeSource", removeSource);
params.setNonNull(CommonParams.ROWS, batchSize);
collectionParams.forEach((k, v) -> params.setNonNull(k, v));
return params;
}
}
/**
* Return a SolrRequest for low-level detailed status of the specified collection.
* @param collection the collection to get the status of.
*/
public static ColStatus collectionStatus(String collection) {
checkNotNull(CoreAdminParams.COLLECTION, collection);
return new ColStatus(collection);
}
/**
* Return a SolrRequest for low-level detailed status of all collections on the cluster.
*/
public static ColStatus collectionStatuses() {
return new ColStatus();
}
public static class ColStatus extends AsyncCollectionAdminRequest {
protected String collection = null;
protected Boolean withSegments = null;
protected Boolean withFieldInfo = null;
protected Boolean withCoreInfo = null;
protected Boolean withSizeInfo = null;
protected Boolean withRawSizeInfo = null;
protected Boolean withRawSizeSummary = null;
protected Boolean withRawSizeDetails = null;
protected Float rawSizeSamplingPercent = null;
private ColStatus(String collection) {
super(CollectionAction.COLSTATUS);
this.collection = collection;
}
private ColStatus() {
super(CollectionAction.COLSTATUS);
}
public ColStatus setWithSegments(boolean withSegments) {
this.withSegments = withSegments;
return this;
}
public ColStatus setWithFieldInfo(boolean withFieldInfo) {
this.withFieldInfo = withFieldInfo;
return this;
}
public ColStatus setWithCoreInfo(boolean withCoreInfo) {
this.withCoreInfo = withCoreInfo;
return this;
}
public ColStatus setWithSizeInfo(boolean withSizeInfo) {
this.withSizeInfo = withSizeInfo;
return this;
}
public ColStatus setWithRawSizeInfo(boolean withRawSizeInfo) {
this.withRawSizeInfo = withRawSizeInfo;
return this;
}
public ColStatus setWithRawSizeSummary(boolean withRawSizeSummary) {
this.withRawSizeSummary = withRawSizeSummary;
return this;
}
public ColStatus setWithRawSizeDetails(boolean withRawSizeDetails) {
this.withRawSizeDetails = withRawSizeDetails;
return this;
}
public ColStatus setRawSizeSamplingPercent(float rawSizeSamplingPercent) {
this.rawSizeSamplingPercent = rawSizeSamplingPercent;
return this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams)super.getParams();
params.setNonNull(CoreAdminParams.COLLECTION, collection);
params.setNonNull("segments", withSegments);
params.setNonNull("fieldInfo", withFieldInfo);
params.setNonNull("coreInfo", withCoreInfo);
params.setNonNull("sizeInfo", withSizeInfo);
params.setNonNull("rawSizeInfo", withRawSizeInfo);
params.setNonNull("rawSizeSummary", withRawSizeSummary);
params.setNonNull("rawSizeDetails", withRawSizeDetails);
params.setNonNull("rawSizeSamplingPercent", rawSizeSamplingPercent);
return params;
}
}
/**
* Returns a SolrRequest to delete a collection
*/
public static Delete deleteCollection(String collection) {
return new Delete(collection);
}
// DELETE request
public static class Delete extends AsyncCollectionSpecificAdminRequest {
private Delete(String collection) {
super(CollectionAction.DELETE, collection);
}
}
public static Backup backupCollection(String collection, String backupName) {
return new Backup(collection, backupName);
}
// BACKUP request
public static class Backup extends AsyncCollectionSpecificAdminRequest {
protected final String name;
protected Optional<String> repositoryName = Optional.empty();
protected String location;
protected Optional<String> commitName = Optional.empty();
protected Optional<String> indexBackupStrategy = Optional.empty();
public Backup(String collection, String name) {
super(CollectionAction.BACKUP, collection);
this.name = name;
this.repositoryName = Optional.empty();
}
public String getLocation() {
return location;
}
public Backup setLocation(String location) {
this.location = location;
return this;
}
public Optional<String> getRepositoryName() {
return repositoryName;
}
public Backup setRepositoryName(String repositoryName) {
this.repositoryName = Optional.ofNullable(repositoryName);
return this;
}
public Optional<String> getCommitName() {
return commitName;
}
public Backup setCommitName(String commitName) {
this.commitName = Optional.ofNullable(commitName);
return this;
}
public Optional<String> getIndexBackupStrategy() {
return indexBackupStrategy;
}
public Backup setIndexBackupStrategy(String indexBackupStrategy) {
this.indexBackupStrategy = Optional.ofNullable(indexBackupStrategy);
return this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.set(CoreAdminParams.COLLECTION, collection);
params.set(CoreAdminParams.NAME, name);
params.set(CoreAdminParams.BACKUP_LOCATION, location); //note: optional
if (repositoryName.isPresent()) {
params.set(CoreAdminParams.BACKUP_REPOSITORY, repositoryName.get());
}
if (commitName.isPresent()) {
params.set(CoreAdminParams.COMMIT_NAME, commitName.get());
}
if (indexBackupStrategy.isPresent()) {
params.set(CollectionAdminParams.INDEX_BACKUP_STRATEGY, indexBackupStrategy.get());
}
return params;
}
}
public static Restore restoreCollection(String collection, String backupName) {
return new Restore(collection, backupName);
}
// RESTORE request
public static class Restore extends AsyncCollectionSpecificAdminRequest {
protected final String backupName;
protected Optional<String> repositoryName = Optional.empty();
protected String location;
// in common with collection creation:
protected String configName;
protected Integer replicationFactor;
protected Integer nrtReplicas;
protected Integer tlogReplicas;
protected Integer pullReplicas;
protected Optional<String> createNodeSet = Optional.empty();
protected Optional<Boolean> createNodeSetShuffle = Optional.empty();
protected Properties properties;
public Restore(String collection, String backupName) {
super(CollectionAction.RESTORE, collection);
this.backupName = backupName;
}
public String getLocation() {
return location;
}
public Restore setLocation(String location) {
this.location = location;
return this;
}
public Optional<String> getRepositoryName() {
return repositoryName;
}
public Restore setRepositoryName(String repositoryName) {
this.repositoryName = Optional.ofNullable(repositoryName);
return this;
}
public void setCreateNodeSet(String createNodeSet) {
this.createNodeSet = Optional.of(createNodeSet);
}
public Optional<String> getCreateNodeSet() {
return createNodeSet;
}
public Optional<Boolean> getCreateNodeSetShuffle() {
return createNodeSetShuffle;
}
public void setCreateNodeSetShuffle(boolean createNodeSetShuffle) {
this.createNodeSetShuffle = Optional.of(createNodeSetShuffle);
}
// Collection creation params in common:
public Restore setConfigName(String config) { this.configName = config; return this; }
public String getConfigName() { return configName; }
public Integer getReplicationFactor() { return replicationFactor; }
public Restore setReplicationFactor(Integer replicationFactor) { this.replicationFactor = replicationFactor; return this; }
public Integer getNrtReplicas() { return nrtReplicas; }
public Restore setNrtReplicas(Integer nrtReplicas) { this.nrtReplicas= nrtReplicas; return this; };
public Integer getTlogReplicas() { return tlogReplicas; }
public Restore setTlogReplicas(Integer tlogReplicas) { this.tlogReplicas = tlogReplicas; return this; }
public Integer getPullReplicas() { return pullReplicas; }
public Restore setPullReplicas(Integer pullReplicas) { this.pullReplicas = pullReplicas; return this; }
public Properties getProperties() {
return properties;
}
public Restore setProperties(Properties properties) { this.properties = properties; return this;}
// TODO support rule, snitch
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.set(CoreAdminParams.COLLECTION, collection);
params.set(CoreAdminParams.NAME, backupName);
params.set(CoreAdminParams.BACKUP_LOCATION, location); //note: optional
params.set("collection.configName", configName); //note: optional
if (replicationFactor != null && nrtReplicas != null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Cannot set both replicationFactor and nrtReplicas as they mean the same thing");
}
if (replicationFactor != null) {
params.set(ZkStateReader.REPLICATION_FACTOR, replicationFactor);
}
if (nrtReplicas != null) {
params.set(ZkStateReader.NRT_REPLICAS, nrtReplicas);
}
if (pullReplicas != null) {
params.set(ZkStateReader.PULL_REPLICAS, pullReplicas);
}
if (tlogReplicas != null) {
params.set(ZkStateReader.TLOG_REPLICAS, tlogReplicas);
}
if (properties != null) {
addProperties(params, properties);
}
if (repositoryName.isPresent()) {
params.set(CoreAdminParams.BACKUP_REPOSITORY, repositoryName.get());
}
if (createNodeSet.isPresent()) {
params.set(CREATE_NODE_SET_PARAM, createNodeSet.get());
}
if (createNodeSetShuffle.isPresent()) {
params.set(CREATE_NODE_SET_SHUFFLE_PARAM, createNodeSetShuffle.get());
}
return params;
}
}
//Note : This method is added since solrj module does not use Google
// guava library. Also changes committed for SOLR-8765 result in wrong
// error message when "collection" parameter is specified as Null.
// This is because the setCollectionName method is deprecated.
static <T> T checkNotNull(String param, T value) {
if (value == null) {
throw new NullPointerException("Please specify a non-null value for parameter " + param);
}
return value;
}
@SuppressWarnings("serial")
public static class CreateSnapshot extends AsyncCollectionSpecificAdminRequest {
protected final String commitName;
public CreateSnapshot(String collection, String commitName) {
super(CollectionAction.CREATESNAPSHOT, checkNotNull(CoreAdminParams.COLLECTION ,collection));
this.commitName = checkNotNull(CoreAdminParams.COMMIT_NAME, commitName);
}
public String getCollectionName() {
return collection;
}
public String getCommitName() {
return commitName;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.set(CoreAdminParams.COLLECTION, collection);
params.set(CoreAdminParams.COMMIT_NAME, commitName);
return params;
}
}
@SuppressWarnings("serial")
public static class DeleteSnapshot extends AsyncCollectionSpecificAdminRequest {
protected final String commitName;
public DeleteSnapshot (String collection, String commitName) {
super(CollectionAction.DELETESNAPSHOT, checkNotNull(CoreAdminParams.COLLECTION ,collection));
this.commitName = checkNotNull(CoreAdminParams.COMMIT_NAME, commitName);
}
public String getCollectionName() {
return collection;
}
public String getCommitName() {
return commitName;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.set(CoreAdminParams.COLLECTION, collection);
params.set(CoreAdminParams.COMMIT_NAME, commitName);
return params;
}
}
@SuppressWarnings("serial")
public static class ListSnapshots extends AsyncCollectionSpecificAdminRequest {
public ListSnapshots (String collection) {
super(CollectionAction.LISTSNAPSHOTS, checkNotNull(CoreAdminParams.COLLECTION ,collection));
}
public String getCollectionName() {
return collection;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.set(CoreAdminParams.COLLECTION, collection);
return params;
}
}
/**
* Returns a SolrRequest to create a new shard in a collection
*/
public static CreateShard createShard(String collection, String shard) {
return new CreateShard(collection, shard);
}
// CREATESHARD request
public static class CreateShard extends AsyncShardSpecificAdminRequest {
protected String nodeSet;
protected Properties properties;
public CreateShard setNodeSet(String nodeSet) {
this.nodeSet = nodeSet;
return this;
}
public String getNodeSet() {
return nodeSet;
}
public Properties getProperties() {
return properties;
}
public CreateShard setProperties(Properties properties) {
this.properties = properties;
return this;
}
private CreateShard(String collection, String shard) {
super(CollectionAction.CREATESHARD, collection, SolrIdentifierValidator.validateShardName(shard));
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
if (nodeSet != null) {
params.set(CREATE_NODE_SET_PARAM, nodeSet);
}
if (properties != null) {
addProperties(params, properties);
}
return params;
}
}
/**
* Returns a SolrRequest to split a shard in a collection
*/
public static SplitShard splitShard(String collection) {
return new SplitShard(collection);
}
// SPLITSHARD request
public static class SplitShard extends AsyncCollectionAdminRequest {
protected String collection;
protected String ranges;
protected String splitKey;
protected String shard;
protected String splitMethod;
protected Boolean splitByPrefix;
protected Integer numSubShards;
protected Float splitFuzz;
private Properties properties;
private SplitShard(String collection) {
super(CollectionAction.SPLITSHARD);
this.collection = checkNotNull(CoreAdminParams.COLLECTION, collection);
}
public SplitShard setRanges(String ranges) { this.ranges = ranges; return this; }
public String getRanges() { return ranges; }
public Integer getNumSubShards() {
return numSubShards;
}
public SplitShard setNumSubShards(Integer numSubShards) {
this.numSubShards = numSubShards;
return this;
}
public SplitShard setSplitMethod(String splitMethod) {
this.splitMethod = splitMethod;
return this;
}
public String getSplitMethod() {
return splitMethod;
}
public SplitShard setSplitFuzz(float splitFuzz) {
this.splitFuzz = splitFuzz;
return this;
}
public Float getSplitFuzz() {
return splitFuzz;
}
public SplitShard setSplitKey(String splitKey) {
this.splitKey = splitKey;
return this;
}
public String getSplitKey() {
return this.splitKey;
}
public Properties getProperties() {
return properties;
}
public SplitShard setProperties(Properties properties) {
this.properties = properties;
return this;
}
public SplitShard setShardName(String shard) {
this.shard = shard;
return this;
}
public Boolean getSplitByPrefix() {
return splitByPrefix;
}
public SplitShard setSplitByPrefix(Boolean splitByPrefix) {
this.splitByPrefix = splitByPrefix;
return this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.set(CollectionAdminParams.COLLECTION, collection);
if (this.shard == null && this.splitKey == null) {
throw new IllegalArgumentException("You must set shardname OR splitkey for this request.");
}
params.set(CoreAdminParams.SHARD, shard);
params.set("split.key", this.splitKey);
params.set(CoreAdminParams.RANGES, ranges);
params.set(CommonAdminParams.SPLIT_METHOD, splitMethod);
if(numSubShards != null) {
params.set("numSubShards", numSubShards);
}
if (splitFuzz != null) {
params.set(CommonAdminParams.SPLIT_FUZZ, String.valueOf(splitFuzz));
}
if (splitByPrefix != null) {
params.set(CommonAdminParams.SPLIT_BY_PREFIX, splitByPrefix);
}
if(properties != null) {
addProperties(params, properties);
}
return params;
}
}
/**
* Returns a SolrRequest to delete a shard from a collection
*/
public static DeleteShard deleteShard(String collection, String shard) {
return new DeleteShard(collection, shard);
}
// DELETESHARD request
public static class DeleteShard extends AsyncShardSpecificAdminRequest {
private Boolean deleteInstanceDir;
private Boolean deleteDataDir;
private DeleteShard(String collection, String shard) {
super(CollectionAction.DELETESHARD, collection, shard);
}
public Boolean getDeleteInstanceDir() {
return deleteInstanceDir;
}
public DeleteShard setDeleteInstanceDir(Boolean deleteInstanceDir) {
this.deleteInstanceDir = deleteInstanceDir;
return this;
}
public Boolean getDeleteDataDir() {
return deleteDataDir;
}
public DeleteShard setDeleteDataDir(Boolean deleteDataDir) {
this.deleteDataDir = deleteDataDir;
return this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
if (deleteInstanceDir != null) {
params.set(CoreAdminParams.DELETE_INSTANCE_DIR, deleteInstanceDir);
}
if (deleteDataDir != null) {
params.set(CoreAdminParams.DELETE_DATA_DIR, deleteDataDir);
}
return params;
}
}
/**
* Returns a SolrRequest to force a leader election for a shard in a collection
*
* WARNING: This may cause data loss if the new leader does not contain updates
* acknowledged by the old leader. Use only if leadership elections are entirely
* broken.
*/
public static ForceLeader forceLeaderElection(String collection, String shard) {
return new ForceLeader(collection, shard);
}
// FORCELEADER request
public static class ForceLeader extends ShardSpecificAdminRequest {
private ForceLeader(String collection, String shard) {
super(CollectionAction.FORCELEADER, collection, shard);
}
}
/**
* A response object for {@link RequestStatus} requests
*/
public static class RequestStatusResponse extends CollectionAdminResponse {
public RequestStatusState getRequestStatus() {
@SuppressWarnings({"rawtypes"})
NamedList innerResponse = (NamedList) getResponse().get("status");
return RequestStatusState.fromKey((String) innerResponse.get("state"));
}
}
/**
* Returns a SolrRequest for checking the status of an asynchronous request
*
* @see CollectionAdminRequest.AsyncCollectionAdminRequest
*/
public static RequestStatus requestStatus(String requestId) {
return new RequestStatus(requestId);
}
public static void waitForAsyncRequest(String requestId, SolrClient client, long timeout) throws SolrServerException, InterruptedException, IOException {
requestStatus(requestId).waitFor(client, timeout);
}
// REQUESTSTATUS request
public static class RequestStatus extends CollectionAdminRequest<RequestStatusResponse> {
protected String requestId = null;
private RequestStatus(String requestId) {
super(CollectionAction.REQUESTSTATUS);
this.requestId = checkNotNull("requestId", requestId);
}
public String getRequestId() {
return this.requestId;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.set(CoreAdminParams.REQUESTID, requestId);
return params;
}
@Override
protected RequestStatusResponse createResponse(SolrClient client) {
return new RequestStatusResponse();
}
/**
* Wait until the asynchronous request is either completed or failed, up to a timeout
* @param client a SolrClient
* @param timeoutSeconds the maximum time to wait in seconds
* @return the last seen state of the request
*/
public RequestStatusState waitFor(SolrClient client, long timeoutSeconds)
throws IOException, SolrServerException, InterruptedException {
long finishTime = System.nanoTime() + TimeUnit.SECONDS.toNanos(timeoutSeconds);
RequestStatusState state = RequestStatusState.NOT_FOUND;
while (System.nanoTime() < finishTime) {
state = this.process(client).getRequestStatus();
if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED) {
deleteAsyncId(requestId).process(client);
return state;
}
TimeUnit.SECONDS.sleep(1);
}
return state;
}
}
/**
* Returns a SolrRequest to delete an asynchronous request status
*/
public static DeleteStatus deleteAsyncId(String requestId) {
return new DeleteStatus(checkNotNull("requestId", requestId), null);
}
/**
* Returns a SolrRequest to delete a all asynchronous request statuses
*/
public static DeleteStatus deleteAllAsyncIds() {
return new DeleteStatus(null, true);
}
// DELETESTATUS request
public static class DeleteStatus extends CollectionAdminRequest<CollectionAdminResponse> {
protected String requestId = null;
protected Boolean flush = null;
private DeleteStatus(String requestId, Boolean flush) {
super(CollectionAction.DELETESTATUS);
if (requestId == null && flush == null)
throw new IllegalArgumentException("Either requestid or flush parameter must be specified.");
if (requestId != null && flush != null)
throw new IllegalArgumentException("Both requestid and flush parameters can not be specified together.");
this.requestId = requestId;
this.flush = flush;
}
public String getRequestId() {
return this.requestId;
}
public Boolean getFlush() {
return this.flush;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
if (requestId != null)
params.set(CoreAdminParams.REQUESTID, requestId);
if (flush != null)
params.set(CollectionAdminParams.FLUSH, flush);
return params;
}
@Override
protected CollectionAdminResponse createResponse(SolrClient client) {
return new CollectionAdminResponse();
}
}
// ALIASPROP request
/**
* Returns a SolrRequest to add or remove properties from an alias
* @param aliasName the alias to modify
*/
public static SetAliasProperty setAliasProperty(String aliasName) {
return new SetAliasProperty(aliasName);
}
public static class SetAliasProperty extends AsyncCollectionAdminRequest {
private final String aliasName;
private Map<String,String> properties = new HashMap<>();
public SetAliasProperty(String aliasName) {
super(CollectionAction.ALIASPROP);
this.aliasName = SolrIdentifierValidator.validateAliasName(aliasName);
}
public SetAliasProperty addProperty(String key, String value) {
properties.put(key,value);
return this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.set(CoreAdminParams.NAME, aliasName);
properties.forEach((key, value) -> params.set("property." + key, value));
return params;
}
}
/**
* Returns a SolrRequest to create a new alias
* @param aliasName the alias name
* @param aliasedCollections the collections to alias
*/
public static CreateAlias createAlias(String aliasName, String aliasedCollections) {
return new CreateAlias(aliasName, aliasedCollections);
}
// CREATEALIAS request
public static class CreateAlias extends AsyncCollectionAdminRequest {
protected String aliasName;
protected String aliasedCollections;
private CreateAlias(String aliasName, String aliasedCollections) {
super(CollectionAction.CREATEALIAS);
this.aliasName = SolrIdentifierValidator.validateAliasName(aliasName);
this.aliasedCollections = checkNotNull("aliasedCollections",aliasedCollections);
}
public String getAliasName() {
return aliasName;
}
public String getAliasedCollections() {
return this.aliasedCollections;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.set(CoreAdminParams.NAME, aliasName);
params.set("collections", aliasedCollections);
return params;
}
}
/**
* Returns a SolrRequest to create a time routed alias. For time based routing, the start
* should be a standard Solr timestamp string (possibly with "date math").
*
* @param aliasName the name of the alias to create.
* @param start the start of the routing. A standard Solr date: ISO-8601 or NOW with date math.
* @param interval date math representing the time duration of each collection (e.g. {@code +1DAY})
* @param routerField the document field to contain the timestamp to route on
* @param createCollTemplate Holds options to create a collection. The "name" is ignored.
*/
public static CreateTimeRoutedAlias createTimeRoutedAlias(String aliasName, String start,
String interval,
String routerField,
Create createCollTemplate) {
return new CreateTimeRoutedAlias(aliasName, routerField, start, interval, createCollTemplate);
}
public static class CreateTimeRoutedAlias extends AsyncCollectionAdminRequest implements RoutedAliasAdminRequest {
// TODO: This and other commands in this file seem to need to share some sort of constants class with core
// to allow this stuff not to be duplicated. (this is pasted from CreateAliasCmd.java), however I think
// a comprehensive cleanup of this for all the requests in this class should be done as a separate ticket.
public static final String ROUTER_START = "router.start";
public static final String ROUTER_INTERVAL = "router.interval";
public static final String ROUTER_MAX_FUTURE = "router.maxFutureMs";
public static final String ROUTER_PREEMPTIVE_CREATE_WINDOW = "router.preemptiveCreateMath";
public static final String ROUTER_AUTO_DELETE_AGE = "router.autoDeleteAge";
private final String aliasName;
private final String routerField;
private final String start;
private final String interval;
//Optional:
private TimeZone tz;
private Integer maxFutureMs;
private String preemptiveCreateMath;
private String autoDeleteAge;
private final Create createCollTemplate;
public CreateTimeRoutedAlias(String aliasName, String routerField, String start, String interval, Create createCollTemplate) {
super(CollectionAction.CREATEALIAS);
this.aliasName = aliasName;
this.start = start;
this.interval = interval;
this.routerField = routerField;
this.createCollTemplate = createCollTemplate;
}
/** Sets the timezone for interpreting any Solr "date math. */
public CreateTimeRoutedAlias setTimeZone(TimeZone tz) {
this.tz = tz;
return this;
}
/** Sets how long into the future (millis) that we will allow a document to pass. */
public CreateTimeRoutedAlias setMaxFutureMs(Integer maxFutureMs) {
this.maxFutureMs = maxFutureMs;
return this;
}
public CreateTimeRoutedAlias setPreemptiveCreateWindow(String preemptiveCreateMath) {
this.preemptiveCreateMath = preemptiveCreateMath;
return this;
}
public CreateTimeRoutedAlias setAutoDeleteAge(String autoDeleteAge) {
this.autoDeleteAge = autoDeleteAge;
return this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.add(CommonParams.NAME, aliasName);
params.add(ROUTER_TYPE_NAME, "time");
params.add(ROUTER_FIELD, routerField);
params.add(ROUTER_START, start);
params.add(ROUTER_INTERVAL, interval);
if (tz != null) {
params.add(CommonParams.TZ, tz.getID());
}
if (maxFutureMs != null) {
params.add(ROUTER_MAX_FUTURE, ""+maxFutureMs);
}
if (preemptiveCreateMath != null) {
params.add(ROUTER_PREEMPTIVE_CREATE_WINDOW, preemptiveCreateMath);
}
if (autoDeleteAge != null) {
params.add(ROUTER_AUTO_DELETE_AGE, autoDeleteAge);
}
// merge the above with collectionParams. Above takes precedence.
ModifiableSolrParams createCollParams = mergeCollParams(createCollTemplate);
return SolrParams.wrapDefaults(params, createCollParams);
}
@Override
public RoutedAliasTypes getType() {
return RoutedAliasTypes.TIME;
}
@Override
public String getRouterField() {
return routerField;
}
@Override
public java.util.List<String> getParamNames() {
return java.util.List.of(ROUTER_TYPE_NAME, ROUTER_FIELD, ROUTER_START, ROUTER_INTERVAL,ROUTER_MAX_FUTURE, ROUTER_PREEMPTIVE_CREATE_WINDOW, ROUTER_AUTO_DELETE_AGE, CommonParams.TZ);
}
@Override
public java.util.List<String> getRequiredParamNames() {
return java.util.List.of(ROUTER_TYPE_NAME, ROUTER_FIELD,ROUTER_START, ROUTER_INTERVAL);
}
}
/**
* Returns a SolrRequest to create a category routed alias.
*
* @param aliasName the name of the alias to create.
* @param routerField the document field to contain the timestamp to route on
* @param maxCardinality the maximum number of collections under this CRA
* @param createCollTemplate Holds options to create a collection. The "name" is ignored.
*/
public static CreateCategoryRoutedAlias createCategoryRoutedAlias(String aliasName,
String routerField,
int maxCardinality,
Create createCollTemplate) {
return new CreateCategoryRoutedAlias(aliasName, routerField, maxCardinality, createCollTemplate);
}
public static class CreateCategoryRoutedAlias extends AsyncCollectionAdminRequest implements RoutedAliasAdminRequest {
public static final String ROUTER_MAX_CARDINALITY = "router.maxCardinality";
public static final String ROUTER_MUST_MATCH = "router.mustMatch";
private final String aliasName;
private final String routerField;
private Integer maxCardinality;
private String mustMatch;
private final Create createCollTemplate;
public CreateCategoryRoutedAlias(String aliasName, String routerField, int maxCardinality, Create createCollTemplate) {
super(CollectionAction.CREATEALIAS);
this.aliasName = aliasName;
this.routerField = routerField;
this.maxCardinality = maxCardinality;
this.createCollTemplate = createCollTemplate;
}
public CreateCategoryRoutedAlias setMustMatch(String regex) {
this.mustMatch = regex;
return this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
params.add(CommonParams.NAME, aliasName);
params.add(ROUTER_TYPE_NAME, RoutedAliasTypes.CATEGORY.name());
params.add(ROUTER_FIELD, routerField);
params.add(ROUTER_MAX_CARDINALITY, maxCardinality.toString());
if (mustMatch != null) {
params.add(ROUTER_MUST_MATCH, mustMatch);
}
// merge the above with collectionParams. Above takes precedence.
ModifiableSolrParams createCollParams = mergeCollParams(createCollTemplate);
return SolrParams.wrapDefaults(params, createCollParams);
}
@Override
public RoutedAliasTypes getType() {
return RoutedAliasTypes.CATEGORY;
}
@Override
public String getRouterField() {
return routerField;
}
@Override
public java.util.List<String> getParamNames() {
return java.util.List.of(ROUTER_TYPE_NAME, ROUTER_FIELD,ROUTER_MAX_CARDINALITY, ROUTER_MUST_MATCH);
}
@Override
public java.util.List<String> getRequiredParamNames() {
return java.util.List.of(ROUTER_TYPE_NAME, ROUTER_FIELD,ROUTER_MAX_CARDINALITY);
}
}
public interface RoutedAliasAdminRequest {
String ROUTER_TYPE_NAME = "router.name";
String ROUTER_FIELD = "router.field";
RoutedAliasTypes getType();
String getRouterField();
java.util.List<String> getParamNames();
java.util.List<String> getRequiredParamNames();
SolrParams getParams();
default ModifiableSolrParams mergeCollParams(Create createCollTemplate) {
ModifiableSolrParams createCollParams = new ModifiableSolrParams(); // output target
if (createCollTemplate == null) {
return createCollParams;
}
final SolrParams collParams = createCollTemplate.getParams();
final Iterator<String> pIter = collParams.getParameterNamesIterator();
while (pIter.hasNext()) {
String key = pIter.next();
if (key.equals(CollectionParams.ACTION) || key.equals("name")) {
continue;
}
createCollParams.set("create-collection." + key, collParams.getParams(key));
}
return createCollParams;
}
}
/**
* Create a Dimensional Routed alias from two or more routed alias types.
*
* @param aliasName The name of the alias
* @param createCollTemplate a create command that will be used for all collections created
* @param dims Routed Alias requests. Note that the aliasName and collection templates inside dimensions
* will be ignored and may be safely set to null
* @return An object representing a basic DimensionalRoutedAlias creation request.
*/
public static DimensionalRoutedAlias createDimensionalRoutedAlias(String aliasName, Create createCollTemplate, RoutedAliasAdminRequest... dims) {
return new DimensionalRoutedAlias(aliasName, createCollTemplate, dims);
}
public static class DimensionalRoutedAlias extends AsyncCollectionAdminRequest implements RoutedAliasAdminRequest {
private String aliasName;
private final Create createCollTemplate;
private final RoutedAliasAdminRequest[] dims;
public DimensionalRoutedAlias(String aliasName, Create createCollTemplate, RoutedAliasAdminRequest... dims) {
super(CollectionAction.CREATEALIAS);
this.aliasName = aliasName;
this.createCollTemplate = createCollTemplate;
this.dims = dims;
}
public static void addDimensionIndexIfRequired(Set<String> params, int i, String param) {
params.add(withDimensionIndexIfRequired(param, i));
}
private static String withDimensionIndexIfRequired(String param, int index) {
if (param.startsWith(ROUTER_PREFIX)) {
return ROUTER_PREFIX + index + "." + param.split("\\.")[1];
} else {
return param;
}
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
java.util.List<String> types = new ArrayList<>();
java.util.List<String> fields = new ArrayList<>();
for (int i = 0; i < dims.length; i++) {
RoutedAliasAdminRequest dim = dims[i];
types.add(dim.getType().name());
fields.add(dim.getRouterField());
for (String param : dim.getParamNames()) {
String value = dim.getParams().get(param);
if (value != null) {
params.add(withDimensionIndexIfRequired(param, i), value);
} else {
if (dim.getRequiredParamNames().contains(param)) {
throw new IllegalArgumentException("Dimension of type " + dim.getType() + " requires a value for " + param);
}
}
}
}
params.add(CommonParams.NAME, aliasName);
params.add(ROUTER_TYPE_NAME, "Dimensional[" + String.join(",", types) + "]");
params.add(ROUTER_FIELD, String.join(",", fields));
// merge the above with collectionParams. Above takes precedence.
ModifiableSolrParams createCollParams = mergeCollParams(createCollTemplate);
return SolrParams.wrapDefaults(params, createCollParams);
}
@Override
public RoutedAliasTypes getType() {
throw new UnsupportedOperationException("Dimensions of dimensions are not allowed, the multiverse might collapse!");
}
@Override
public String getRouterField() {
throw new UnsupportedOperationException("Dimensions of dimensions are not allowed, the multiverse might collapse!");
}
@Override
public java.util.List<String> getParamNames() {
throw new UnsupportedOperationException("Dimensions of dimensions are not allowed, the multiverse might collapse!");
}
@Override
public java.util.List<String> getRequiredParamNames() {
throw new UnsupportedOperationException("Dimensions of dimensions are not allowed, the multiverse might collapse!");
}
}
/**
* Returns a SolrRequest to delete an alias
*/
public static DeleteAlias deleteAlias(String aliasName) {
return new DeleteAlias(aliasName);
}
// DELETEALIAS request
public static class DeleteAlias extends AsyncCollectionAdminRequest {
protected String aliasName;
private DeleteAlias(String aliasName) {
super(CollectionAction.DELETEALIAS);
this.aliasName = checkNotNull("aliasName",aliasName);
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
params.set(CoreAdminParams.NAME, aliasName);
return params;
}
}
/**
* Returns a SolrRequest to add a replica of type {@link org.apache.solr.common.cloud.Replica.Type#NRT} to a shard in a collection
*
*/
public static AddReplica addReplicaToShard(String collection, String shard) {
return addReplicaToShard(collection, shard, Replica.Type.NRT);
}
/**
* Returns a SolrRequest to add a replica of the specified type to a shard in a collection.
* If the replica type is null, the server default will be used.
*
*/
public static AddReplica addReplicaToShard(String collection, String shard, Replica.Type replicaType) {
return new AddReplica(collection, checkNotNull(CoreAdminParams.SHARD, shard), null, replicaType);
}
/**
* Returns a SolrRequest to add a replica to a collection using a route key
*/
public static AddReplica addReplicaByRouteKey(String collection, String routeKey) {
return new AddReplica(collection, null, checkNotNull("routeKey",routeKey), null);
}
// ADDREPLICA request
public static class AddReplica extends AsyncCollectionAdminRequest {
protected String collection;
protected String shard;
protected String node;
protected String coreName;
protected String routeKey;
protected String instanceDir;
protected String dataDir;
protected String ulogDir;
protected Properties properties;
protected Replica.Type type;
protected Integer nrtReplicas, tlogReplicas, pullReplicas;
protected Boolean skipNodeAssignment;
protected String createNodeSet;
private AddReplica(String collection, String shard, String routeKey, Replica.Type type) {
super(CollectionAction.ADDREPLICA);
this.collection = checkNotNull(CoreAdminParams.COLLECTION, collection);
this.shard = shard;
this.routeKey = routeKey;
this.type = type;
}
public Properties getProperties() {
return properties;
}
public AddReplica setProperties(Properties properties) {
this.properties = properties;
return this;
}
public AddReplica withProperty(String key, String value) {
if (this.properties == null)
this.properties = new Properties();
this.properties.setProperty(key, value);
return this;
}
public String getNode() {
return node;
}
public AddReplica setNode(String node) {
this.node = node;
return this;
}
public AddReplica setSkipNodeAssignment(Boolean skipNodeAssignment) {
this.skipNodeAssignment = skipNodeAssignment;
return this;
}
public String getRouteKey() {
return routeKey;
}
public String getInstanceDir() {
return instanceDir;
}
public String getUlogDir() {
return ulogDir;
}
public AddReplica setInstanceDir(String instanceDir) {
this.instanceDir = instanceDir;
return this;
}
public String getDataDir() {
return dataDir;
}
public AddReplica setDataDir(String dataDir) {
this.dataDir = dataDir;
return this;
}
public AddReplica setType(Replica.Type type) {
this.type = type;
return this;
}
public AddReplica setCoreName(String coreName) {
this.coreName = coreName;
return this;
}
public AddReplica setUlogDir(String ulogDir) {
this.ulogDir = ulogDir;
return this;
}
public String getShard() {
return shard;
}
public Integer getNrtReplicas() {
return nrtReplicas;
}
public AddReplica setNrtReplicas(Integer nrtReplicas) {
this.nrtReplicas = nrtReplicas;
return this;
}
public Integer getTlogReplicas() {
return tlogReplicas;
}
public AddReplica setTlogReplicas(Integer tlogReplicas) {
this.tlogReplicas = tlogReplicas;
return this;
}
public Integer getPullReplicas() {
return pullReplicas;
}
public AddReplica setPullReplicas(Integer pullReplicas) {
this.pullReplicas = pullReplicas;
return this;
}
public String getCreateNodeSet() {
return createNodeSet;
}
public AddReplica setCreateNodeSet(String createNodeSet) {
this.createNodeSet = createNodeSet;
return this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
params.add(CoreAdminParams.COLLECTION, collection);
assert ((null == routeKey) ^ (null == shard));
if (null != shard) {
params.add(CoreAdminParams.SHARD, shard);
}
if (null != routeKey) {
params.add(ShardParams._ROUTE_, routeKey);
}
if (node != null) {
params.add(CoreAdminParams.NODE, node);
}
if (skipNodeAssignment != null) {
params.add(SKIP_NODE_ASSIGNMENT, String.valueOf(skipNodeAssignment));
}
if (instanceDir != null) {
params.add(CoreAdminParams.INSTANCE_DIR, instanceDir);
}
if (dataDir != null) {
params.add(CoreAdminParams.DATA_DIR, dataDir);
}
if (ulogDir != null) {
params.add(CoreAdminParams.ULOG_DIR, ulogDir);
}
if (coreName != null) {
params.add(CoreAdminParams.NAME, coreName);
}
if (type != null) {
params.add(ZkStateReader.REPLICA_TYPE, type.name());
}
if (properties != null) {
addProperties(params, properties);
}
if (nrtReplicas != null) {
params.add(NRT_REPLICAS, String.valueOf(nrtReplicas));
}
if (tlogReplicas != null) {
params.add(TLOG_REPLICAS, String.valueOf(tlogReplicas));
}
if (pullReplicas != null) {
params.add(PULL_REPLICAS, String.valueOf(pullReplicas));
}
if (createNodeSet != null) {
params.add(CREATE_NODE_SET_PARAM, createNodeSet);
}
return params;
}
}
/**
* Returns a SolrRequest to delete a replica from a shard in a collection
*/
public static DeleteReplica deleteReplica(String collection, String shard, String replica) {
return new DeleteReplica(collection, checkNotNull(CoreAdminParams.SHARD, shard),
checkNotNull(CoreAdminParams.REPLICA, replica));
}
public static DeleteReplica deleteReplica(String collection, String shard, int count) {
return new DeleteReplica(collection, checkNotNull(CoreAdminParams.SHARD, shard), count);
}
/**
* Returns a SolrRequest to remove a number of replicas from a specific shard
*/
public static DeleteReplica deleteReplicasFromShard(String collection, String shard, int count) {
return new DeleteReplica(collection, checkNotNull(CoreAdminParams.SHARD, shard), count);
}
public static DeleteReplica deleteReplicasFromAllShards(String collection, int count) {
return new DeleteReplica(collection, count);
}
// DELETEREPLICA request
public static class DeleteReplica extends AsyncCollectionSpecificAdminRequest {
protected String shard;
protected String replica;
protected Boolean onlyIfDown;
private Boolean deleteDataDir;
private Boolean deleteInstanceDir;
private Boolean deleteIndexDir;
private Integer count;
private DeleteReplica(String collection, String shard, String replica) {
super(CollectionAction.DELETEREPLICA, collection);
this.shard = shard;
this.replica = replica;
}
private DeleteReplica(String collection, String shard, int count) {
super(CollectionAction.DELETEREPLICA, collection);
this.shard = shard;
this.count = count;
}
private DeleteReplica(String collection, int count) {
super(CollectionAction.DELETEREPLICA, collection);
this.count = count;
}
public String getReplica() {
return this.replica;
}
public DeleteReplica setOnlyIfDown(boolean onlyIfDown) {
this.onlyIfDown = onlyIfDown;
return this;
}
public Boolean getOnlyIfDown() {
return this.onlyIfDown;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
// AsyncCollectionSpecificAdminRequest uses 'name' rather than 'collection'
// TODO - deal with this inconsistency
params.remove(CoreAdminParams.NAME);
params.set(ZkStateReader.COLLECTION_PROP, this.collection);
if (this.replica != null)
params.set(ZkStateReader.REPLICA_PROP, this.replica);
if (this.shard != null)
params.set(ZkStateReader.SHARD_ID_PROP, this.shard);
if (onlyIfDown != null) {
params.set("onlyIfDown", onlyIfDown);
}
if (deleteDataDir != null) {
params.set(CoreAdminParams.DELETE_DATA_DIR, deleteDataDir);
}
if (deleteInstanceDir != null) {
params.set(CoreAdminParams.DELETE_INSTANCE_DIR, deleteInstanceDir);
}
if (deleteIndexDir != null) {
params.set(CoreAdminParams.DELETE_INDEX, deleteIndexDir);
}
if (count != null) {
params.set(COUNT_PROP, count);
}
return params;
}
public Boolean getDeleteDataDir() {
return deleteDataDir;
}
public DeleteReplica setDeleteDataDir(Boolean deleteDataDir) {
this.deleteDataDir = deleteDataDir;
return this;
}
public Boolean getDeleteInstanceDir() {
return deleteInstanceDir;
}
public DeleteReplica setDeleteInstanceDir(Boolean deleteInstanceDir) {
this.deleteInstanceDir = deleteInstanceDir;
return this;
}
public Boolean getDeleteIndexDir() {
return deleteIndexDir;
}
public DeleteReplica setDeleteIndexDir(Boolean deleteIndexDir) {
this.deleteIndexDir = deleteIndexDir;
return this;
}
}
/**
* Returns a SolrRequest to set (or unset) a cluster property
*/
public static ClusterProp setClusterProperty(String propertyName, String propertyValue) {
return new ClusterProp(propertyName, propertyValue);
}
// CLUSTERPROP request
public static class ClusterProp extends CollectionAdminRequest<CollectionAdminResponse> {
private String propertyName;
private String propertyValue;
private ClusterProp(String propertyName, String propertyValue) {
super(CollectionAction.CLUSTERPROP);
this.propertyName = checkNotNull("propertyName",propertyName);
this.propertyValue = propertyValue;
}
public String getPropertyName() {
return this.propertyName;
}
public String getPropertyValue() {
return this.propertyValue;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
params.add(CoreAdminParams.NAME, propertyName);
params.add("val", propertyValue);
return params;
}
@Override
protected CollectionAdminResponse createResponse(SolrClient client) {
return new CollectionAdminResponse();
}
}
public static CollectionProp setCollectionProperty(String collection, String propertyName, String propertyValue) {
return new CollectionProp(collection, propertyName, propertyValue);
}
// COLLECTIONPROP request
public static class CollectionProp extends AsyncCollectionSpecificAdminRequest {
private String propertyName;
private String propertyValue;
private CollectionProp(String collection, String propertyName, String propertyValue) {
super(CollectionAction.COLLECTIONPROP, collection);
this.propertyName = checkNotNull("propertyName", propertyName);
this.propertyValue = propertyValue;
}
public String getPropertyName() {
return this.propertyName;
}
public String getPropertyValue() {
return this.propertyValue;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
params.add(CollectionAdminParams.PROPERTY_NAME, propertyName);
params.add(CollectionAdminParams.PROPERTY_VALUE, propertyValue);
return params;
}
@Override
protected CollectionAdminResponse createResponse(SolrClient client) {
return new CollectionAdminResponse();
}
}
/**
* Returns a SolrRequest to migrate data matching a split key to another collection
*/
public static Migrate migrateData(String collection, String targetCollection, String splitKey) {
return new Migrate(collection, targetCollection, splitKey);
}
// MIGRATE request
public static class Migrate extends AsyncCollectionAdminRequest {
private String collection;
private String targetCollection;
private String splitKey;
private Integer forwardTimeout;
private Properties properties;
private Migrate(String collection, String targetCollection, String splitKey) {
super(CollectionAction.MIGRATE);
this.collection = checkNotNull(CoreAdminParams.COLLECTION, collection);
this.targetCollection = checkNotNull("targetCollection", targetCollection);
this.splitKey = checkNotNull("split.key", splitKey);
}
public String getCollectionName() {
return collection;
}
public String getTargetCollection() {
return this.targetCollection;
}
public String getSplitKey() {
return this.splitKey;
}
public Migrate setForwardTimeout(int forwardTimeout) {
this.forwardTimeout = forwardTimeout;
return this;
}
public Integer getForwardTimeout() {
return this.forwardTimeout;
}
public Migrate setProperties(Properties properties) {
this.properties = properties;
return this;
}
public Properties getProperties() {
return this.properties;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
params.set(CoreAdminParams.COLLECTION, collection);
params.set("target.collection", targetCollection);
params.set("split.key", splitKey);
if (forwardTimeout != null) {
params.set("forward.timeout", forwardTimeout);
}
if (properties != null) {
addProperties(params, properties);
}
return params;
}
}
/**
* Returns a SolrRequest to add a role to a node
*/
public static AddRole addRole(String node, String role) {
return new AddRole(node, role);
}
// ADDROLE request
public static class AddRole extends CollectionAdminRoleRequest {
private AddRole(String node, String role) {
super(CollectionAction.ADDROLE, node, role);
}
}
/**
* Returns a SolrRequest to remove a role from a node
*/
public static RemoveRole removeRole(String node, String role) {
return new RemoveRole(node, role);
}
// REMOVEROLE request
public static class RemoveRole extends CollectionAdminRoleRequest {
private RemoveRole(String node, String role) {
super(CollectionAction.REMOVEROLE, node, role);
}
}
/**
* Return a SolrRequest to get the Overseer status
*/
public static OverseerStatus getOverseerStatus() {
return new OverseerStatus();
}
// OVERSEERSTATUS request
public static class OverseerStatus extends AsyncCollectionAdminRequest {
public OverseerStatus () {
super(CollectionAction.OVERSEERSTATUS);
}
}
/**
* Return a SolrRequest to get the Cluster status
*/
public static ClusterStatus getClusterStatus() {
return new ClusterStatus();
}
// CLUSTERSTATUS request
public static class ClusterStatus extends CollectionAdminRequest<CollectionAdminResponse> {
protected String shardName = null;
protected String collection = null;
protected String routeKey = null;
public ClusterStatus () {
super(CollectionAction.CLUSTERSTATUS);
}
public ClusterStatus setCollectionName(String collectionName) {
this.collection = collectionName;
return this;
}
public String getCollectionName() {
return collection;
}
public ClusterStatus setShardName(String shard) {
this.shardName = shard;
return this;
}
public String getShardName() {
return this.shardName;
}
public String getRouteKey() {
return routeKey;
}
public ClusterStatus setRouteKey(String routeKey) {
this.routeKey = routeKey;
return this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
if (collection != null) {
params.set(CoreAdminParams.COLLECTION, collection);
}
if (shardName != null) {
params.set(CoreAdminParams.SHARD, shardName);
}
if (routeKey != null) {
params.set(ShardParams._ROUTE_, routeKey);
}
return params;
}
@Override
protected CollectionAdminResponse createResponse(SolrClient client) {
return new CollectionAdminResponse();
}
}
// LISTALIASES request
public static class ListAliases extends CollectionAdminRequest<CollectionAdminResponse> {
public ListAliases() {
super(CollectionAction.LISTALIASES);
}
@Override
protected CollectionAdminResponse createResponse(SolrClient client) {
return new CollectionAdminResponse();
}
}
/**
* Returns a SolrRequest to get a list of collections in the cluster
*/
@SuppressWarnings({"unchecked"})
public static java.util.List<String> listCollections(SolrClient client) throws IOException, SolrServerException {
CollectionAdminResponse resp = new List().process(client);
return (java.util.List<String>) resp.getResponse().get("collections");
}
// LIST request
public static class List extends CollectionAdminRequest<CollectionAdminResponse> {
public List () {
super(CollectionAction.LIST);
}
@Override
protected CollectionAdminResponse createResponse(SolrClient client) {
return new CollectionAdminResponse();
}
}
/**
* Returns a SolrRequest to add a property to a specific replica
*/
public static AddReplicaProp addReplicaProperty(String collection, String shard, String replica,
String propertyName, String propertyValue) {
return new AddReplicaProp(collection, shard, replica, propertyName, propertyValue);
}
// ADDREPLICAPROP request
public static class AddReplicaProp extends AsyncShardSpecificAdminRequest {
private String replica;
private String propertyName;
private String propertyValue;
private Boolean shardUnique;
private AddReplicaProp(String collection, String shard, String replica, String propertyName, String propertyValue) {
super(CollectionAction.ADDREPLICAPROP, collection, shard);
this.replica = checkNotNull(CoreAdminParams.REPLICA, replica);
this.propertyName = checkNotNull("propertyName",propertyName);
this.propertyValue = checkNotNull("propertyValue",propertyValue);
}
public String getReplica() {
return replica;
}
public String getPropertyName() {
return propertyName;
}
public String getPropertyValue() {
return propertyValue;
}
public Boolean getShardUnique() {
return shardUnique;
}
public AddReplicaProp setShardUnique(Boolean shardUnique) {
this.shardUnique = shardUnique;
return this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
params.set(CoreAdminParams.REPLICA, replica);
params.set("property", propertyName);
params.set("property.value", propertyValue);
if (shardUnique != null) {
params.set("shardUnique", shardUnique);
}
return params;
}
}
/**
* Returns a SolrRequest to delete a property from a specific replica
*/
public static DeleteReplicaProp deleteReplicaProperty(String collection, String shard,
String replica, String propertyName) {
return new DeleteReplicaProp(collection, shard, replica, propertyName);
}
// DELETEREPLICAPROP request
public static class DeleteReplicaProp extends AsyncShardSpecificAdminRequest {
private String replica;
private String propertyName;
private DeleteReplicaProp(String collection, String shard, String replica, String propertyName) {
super(CollectionAction.DELETEREPLICAPROP, collection, shard);
this.replica = checkNotNull(CoreAdminParams.REPLICA, replica);
this.propertyName = checkNotNull("propertyName",propertyName);
}
public String getReplica() {
return replica;
}
public String getPropertyName() {
return propertyName;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
params.set(CoreAdminParams.REPLICA, replica);
params.set("property", propertyName);
return params;
}
}
/**
* Returns a SolrRequest to balance a replica property across the shards of a collection
*/
public static BalanceShardUnique balanceReplicaProperty(String collection, String propertyName) {
return new BalanceShardUnique(collection, propertyName);
}
// BALANCESHARDUNIQUE request
public static class BalanceShardUnique extends AsyncCollectionAdminRequest {
protected String collection;
protected String propertyName;
protected Boolean onlyActiveNodes;
protected Boolean shardUnique;
private BalanceShardUnique(String collection, String propertyName) {
super(CollectionAction.BALANCESHARDUNIQUE);
this.collection = checkNotNull(CoreAdminParams.COLLECTION, collection);
this.propertyName = checkNotNull("propertyName",propertyName);
}
public String getPropertyName() {
return propertyName;
}
public Boolean getOnlyActiveNodes() {
return onlyActiveNodes;
}
public BalanceShardUnique setOnlyActiveNodes(Boolean onlyActiveNodes) {
this.onlyActiveNodes = onlyActiveNodes;
return this;
}
public Boolean getShardUnique() {
return shardUnique;
}
public BalanceShardUnique setShardUnique(Boolean shardUnique) {
this.shardUnique = shardUnique;
return this;
}
public String getCollection() {
return collection;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
params.set(CoreAdminParams.COLLECTION, collection);
params.set("property", propertyName);
if (onlyActiveNodes != null)
params.set("onlyactivenodes", onlyActiveNodes);
if (shardUnique != null)
params.set("shardUnique", shardUnique);
return params;
}
}
/**
* A Modify Collection request
*/
public static class Modify extends AsyncCollectionSpecificAdminRequest {
protected Map<String, Object> attributes;
private Modify(String collection, Map<String, Object> attributes) {
super(CollectionAction.MODIFYCOLLECTION, collection);
this.attributes = attributes;
}
/**
* Sets the attributes to be modified using the Modify Collection API.
* <b>Note: this method will overwrite any previously set attributes</b>
*
* @param attributes a map of attribute key vs value
*/
public void setAttributes(Map<String, Object> attributes) {
this.attributes = attributes;
}
/**
* Sets the collection attribute to the given value
*
* @param key a string attribute key, must be one of the entries documented
* in the <a href="https://lucene.apache.org/solr/guide/collections-api.html#modifycollection">Modify Collection API documentation</a>
* @param value the attribute value for the given key
*/
public Modify setAttribute(String key, Object value) {
if (key == null) {
throw new IllegalArgumentException("Attribute key cannot be null for the modify collection API");
}
if (!MODIFIABLE_COLLECTION_PROPERTIES.contains(key)) {
throw new IllegalArgumentException("Unknown attribute key: "
+ key + ". Must be one of: " + MODIFIABLE_COLLECTION_PROPERTIES);
}
if (value == null) {
throw new IllegalArgumentException("Value cannot be null for key: " + key);
}
if (attributes == null) {
attributes = new HashMap<>();
}
attributes.put(key, value);
return this;
}
/**
* Removes the given key from the collection
*
* @param key the string attribute key, must be one of the entries documented
* in the <a href="https://lucene.apache.org/solr/guide/collections-api.html#modifycollection">Modify Collection API documentation</a>
*/
public Modify unsetAttribute(String key) {
if (key == null) {
throw new IllegalArgumentException("Attribute key cannot be null for the modify collection API");
}
if (!MODIFIABLE_COLLECTION_PROPERTIES.contains(key)) {
throw new IllegalArgumentException("Unknown attribute key: "
+ key + ". Must be one of: " + MODIFIABLE_COLLECTION_PROPERTIES);
}
if (attributes == null) {
attributes = new HashMap<>();
}
attributes.put(key, "");
return this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
params.set(CoreAdminParams.COLLECTION, collection);
for (Map.Entry<String, Object> entry : attributes.entrySet()) {
params.set(entry.getKey(), String.valueOf(entry.getValue()));
}
return params;
}
}
}
| 1 | 39,184 | please don't use wildcard imports | apache-lucene-solr | java |
@@ -219,6 +219,8 @@ describe('Formulas general', () => {
});
it('should recalculate table after changing cell value (by reference)', () => {
+ let sourceDataReference = null;
+
const afterChange = jasmine.createSpy();
const hot = handsontable({
data: getDataSimpleExampleFormulas(), | 1 | describe('Formulas general', () => {
const id = 'testContainer';
beforeEach(function() {
this.$container = $(`<div id="${id}"></div>`).appendTo('body');
});
afterEach(function() {
if (this.$container) {
destroy();
this.$container.remove();
}
});
it('should calculate table (simple example)', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
expect(hot.getDataAtRow(0)).toEqual([0, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 0]);
expect(hot.getDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, 8042, 10058, '#DIV/0!', 12, '\'=SUM(E5)']);
});
it('should calculate table (advanced example)', () => {
const hot = handsontable({
data: getDataAdvancedExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
expect(hot.getDataAtRow(0)).toEqual(['Example #1', '', '', '', '', '', '', '']);
expect(hot.getDataAtRow(1)).toEqual(['Text', 'yellow', 'red', 'blue', 'green', 'pink', 'gray', '']);
expect(hot.getDataAtRow(2)).toEqual(['Yellow dog on green grass', 'yellow', '', '', 'green', '', '', '']);
expect(hot.getDataAtRow(3)).toEqual(['Gray sweater with blue stripes', '', '', 'blue', '', '', 'gray', '']);
expect(hot.getDataAtRow(4)).toEqual(['A red sun on a pink horizon', '', 'red', '', '', 'pink', '', '']);
expect(hot.getDataAtRow(5)).toEqual(['Blue neon signs everywhere', '', '', 'blue', '', '', '', '']);
expect(hot.getDataAtRow(6)).toEqual(['Waves of blue and green', '', '', 'blue', 'green', '', '', '']);
expect(hot.getDataAtRow(7)).toEqual(['Hot pink socks and gray socks', '', '', '', '', 'pink', 'gray', '']);
expect(hot.getDataAtRow(8)).toEqual(['Deep blue eyes', '', '', 'blue', '', '', '', '']);
expect(hot.getDataAtRow(9)).toEqual(['Count of colors', 1, 1, 4, 2, 2, 2, 'SUM: 12']);
expect(hot.getDataAtRow(10)).toEqual(['', '', '', '', '', '', '', '']);
expect(hot.getDataAtRow(11)).toEqual(['Example #2', '', '', '', '', '', '', '']);
expect(hot.getDataAtRow(12)).toEqual(['Name', 'Email', 'Email domain', '', '', '', '', '']);
expect(hot.getDataAtRow(13)).toEqual(['Ann Chang', '[email protected]', 'maaker.com', '', '', '', '', '']);
expect(hot.getDataAtRow(14)).toEqual(['Jan Siuk', '[email protected]', 'yahoo.com', '', '', '', '', '']);
expect(hot.getDataAtRow(15)).toEqual(['Ken Siuk', '[email protected]', 'gmail.com', '', '', '', '', '']);
expect(hot.getDataAtRow(16)).toEqual(['Marcin Kowalski', '[email protected]', 'syndex.pl', '', '', '', '', '']);
});
it('should not treat single equality sign (=) as a formula expression', () => {
const hot = handsontable({
data: [['=', '=3']],
formulas: true,
width: 500,
height: 300
});
expect(hot.getDataAtCell(0, 0)).toBe('=');
expect(hot.getDataAtCell(0, 1)).toBe(3);
hot.setDataAtCell(0, 1, '=');
expect(hot.getDataAtCell(0, 0)).toBe('=');
expect(hot.getDataAtCell(0, 1)).toBe('=');
});
it('should calculate table with semicolon as separator of formula arguments', () => {
const data = getDataSimpleExampleFormulas();
data[2][4] = '=SUM(A4;2;3)';
data[4][2] = '=SUM(B5;E3)';
const hot = handsontable({
data,
formulas: true,
width: 500,
height: 300
});
expect(hot.getDataAtRow(0)).toEqual([0, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 0]);
expect(hot.getDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, 8042, 10058, '#DIV/0!', 12, '\'=SUM(E5)']);
});
it('should recalculate table with formulas defined where the next cell is depend on the previous cell', () => {
const afterChange = jasmine.createSpy();
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
afterChange,
});
hot.setDataAtCell(0, 1, '=B5');
hot.setDataAtCell(0, 2, '=B1');
hot.setDataAtCell(0, 3, '=C1');
hot.setDataAtCell(4, 5, '=D1');
expect(hot.getDataAtRow(0)).toEqual([0, 8042, 8042, 8042, 'Mini', 0]);
expect(hot.getDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, 2016, 8042]);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, 8042, 10058, '#DIV/0!', 12, 8042]);
hot.setDataAtCell(1, 0, 10);
expect(hot.getDataAtRow(0)).toEqual([0, 6043, 6043, 6043, 'Mini', 0]);
expect(hot.getDataAtRow(1)).toEqual([10, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, 2016, 6043]);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, 6043, 8059, '#DIV/0!', 12, 6043]);
});
it('should throw error while parsing invalid cell coordinates syntax', () => {
const data = getDataSimpleExampleFormulas();
data[0][0] = '=SUM($$A4;2;3)';
data[0][1] = '=A$$$$$1';
data[0][2] = '=A1$';
data[0][3] = '=SUM(A2:D2$)';
const hot = handsontable({
data,
formulas: true,
width: 500,
height: 300
});
hot.setDataAtCell(2, 0, '=A1$');
hot.setDataAtCell(3, 0, '=$A$$1');
expect(hot.getDataAtRow(0)).toEqual(['#ERROR!', '#ERROR!', '#ERROR!', '#ERROR!', 'Mini', '#ERROR!']);
expect(hot.getDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual(['#ERROR!', 5, 2905, 2867, '#ERROR!', '#ERROR!']);
expect(hot.getDataAtRow(3)).toEqual(['#ERROR!', 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, '#ERROR!', '#ERROR!', '#DIV/0!', 12, '\'=SUM(E5)']);
});
it('should return correct values according to plugin state updated by updateSettings()', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.updateSettings({ formulas: false });
expect(hot.getDataAtRow(0)).toEqual(['=$B$2', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, '=SUM(A4,2,3)', '=$B1']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, '=Sum(a2:a5)', '=SUM(B5,E3)', '=A2/B2', 12, '\'=SUM(E5)']);
hot.updateSettings({ formulas: true });
expect(hot.getDataAtRow(0)).toEqual([0, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 0]);
expect(hot.getDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, 8042, 10058, '#DIV/0!', 12, '\'=SUM(E5)']);
});
it('should return correct values according to plugin state updated by disablePlugin/enablePlugin methods', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.getPlugin('formulas').disablePlugin();
hot.render();
expect(hot.getDataAtRow(0)).toEqual(['=$B$2', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, '=SUM(A4,2,3)', '=$B1']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, '=Sum(a2:a5)', '=SUM(B5,E3)', '=A2/B2', 12, '\'=SUM(E5)']);
hot.getPlugin('formulas').enablePlugin();
hot.render();
expect(hot.getDataAtRow(0)).toEqual([0, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 0]);
expect(hot.getDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, 8042, 10058, '#DIV/0!', 12, '\'=SUM(E5)']);
});
it('should recalculate table after changing cell value (setDataAtCell)', () => {
const afterChange = jasmine.createSpy();
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
afterChange,
});
hot.setDataAtCell(1, 1, 20);
expect(hot.getDataAtRow(0)).toEqual([20, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 20]);
expect(hot.getDataAtRow(1)).toEqual([2009, 20, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, 8042, 10058, 100.45, 12, '\'=SUM(E5)']);
expect(afterChange.calls.argsFor(1)).toEqual([[[1, 1, 0, 20]], 'edit', void 0, void 0, void 0, void 0]);
});
it('should recalculate table after changing cell value (by reference)', () => {
const afterChange = jasmine.createSpy();
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
afterChange,
});
hot.getSourceData()[1][1] = 20;
hot.getPlugin('formulas').recalculateFull();
hot.render();
expect(hot.getDataAtRow(0)).toEqual([20, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 20]);
expect(hot.getDataAtRow(1)).toEqual([2009, 20, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, 8042, 10058, 100.45, 12, '\'=SUM(E5)']);
});
it('should recalculate table after changing cell value into formula expression written in lower case', () => {
const afterChange = jasmine.createSpy();
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
afterChange,
});
hot.setDataAtCell(1, 1, '=Sum(a2:A4)');
expect(hot.getDataAtRow(0)).toEqual([6030, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 6030]);
expect(hot.getDataAtRow(1)).toEqual([2009, 6030, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, 8042, 10058, 0.333167495854063, 12, '\'=SUM(E5)']);
expect(afterChange.calls.argsFor(1)).toEqual([[[1, 1, 0, '=Sum(a2:A4)']], 'edit', void 0, void 0, void 0, void 0]);
});
it('should prevent recalculate table after changing cell value into escaped formula expression', () => {
const afterChange = jasmine.createSpy();
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
afterChange,
});
hot.setDataAtCell(1, 1, '\'=SUM(A2:A4)');
expect(hot.getDataAtRow(0)).toEqual(['\'=SUM(A2:A4)', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '\'=SUM(A2:A4)']);
expect(hot.getDataAtRow(1)).toEqual([2009, '\'=SUM(A2:A4)', 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, 8042, 10058, '#VALUE!', 12, '\'=SUM(E5)']);
expect(afterChange.calls.argsFor(1)).toEqual([[[1, 1, 0, '\'=SUM(A2:A4)']], 'edit', void 0, void 0, void 0, void 0]);
});
it('should recalculate table after changing cell value from escaped formula expression into valid formula expression', () => {
const afterChange = jasmine.createSpy();
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
afterChange,
});
hot.setDataAtCell(4, 5, hot.getDataAtCell(4, 5).substr(1));
expect(hot.getDataAtRow(0)).toEqual([0, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 0]);
expect(hot.getDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, 8042, 10058, '#DIV/0!', 12, 12]);
expect(afterChange.calls.argsFor(1)).toEqual([[[4, 5, '\'=SUM(E5)', '=SUM(E5)']], 'edit', void 0, void 0, void 0, void 0]);
});
it('should recalculate table after changing cell value from primitive value into formula expression', () => {
const afterChange = jasmine.createSpy();
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
afterChange,
});
hot.setDataAtCell(1, 1, '=SUM(A2:A4)');
expect(hot.getDataAtRow(0)).toEqual([6030, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 6030]);
expect(hot.getDataAtRow(1)).toEqual([2009, 6030, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, 8042, 10058, 0.333167495854063, 12, '\'=SUM(E5)']);
expect(afterChange.calls.argsFor(1)).toEqual([[[1, 1, 0, '=SUM(A2:A4)']], 'edit', void 0, void 0, void 0, void 0]);
});
it('should recalculate table after changing cell value from formula expression into primitive value', () => {
const afterChange = jasmine.createSpy();
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
afterChange,
});
hot.setDataAtCell(4, 1, 15);
expect(hot.getDataAtRow(0)).toEqual([0, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 0]);
expect(hot.getDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, 15, 2031, '#DIV/0!', 12, '\'=SUM(E5)']);
expect(afterChange.calls.argsFor(1)).toEqual([[[4, 1, '=Sum(a2:a5)', 15]], 'edit', void 0, void 0, void 0, void 0]);
});
it('should recalculate table after changing cell value from formula expression into another formula expression', () => {
const afterChange = jasmine.createSpy();
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
afterChange,
});
hot.setDataAtCell(4, 1, '=SUM(A2:A4)');
expect(hot.getDataAtRow(0)).toEqual([0, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 0]);
expect(hot.getDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, 6030, 8046, '#DIV/0!', 12, '\'=SUM(E5)']);
expect(afterChange.calls.argsFor(1)).toEqual([[[4, 1, '=Sum(a2:a5)', '=SUM(A2:A4)']], 'edit', void 0, void 0, void 0, void 0]);
});
it('should correctly recalculate formulas when precedents cells are located out of table viewport', () => {
const hot = handsontable({
data: getDataForFormulas(0, 'name', ['=B39']),
columns: getColumnsForFormulas(),
formulas: true,
width: 500,
height: 200
});
hot.setDataAtCell(38, 1, 'foo bar');
expect(hot.getDataAtCell(0, 1)).toBe('foo bar');
});
it('should mark cell as #REF! (circular dependency)', () => {
const hot = handsontable({
data: getDataForFormulas(0, 'name', ['=B1']),
columns: getColumnsForFormulas(),
formulas: true,
width: 500,
height: 300
});
expect(hot.getDataAtCell(0, 1)).toBe('#REF!');
});
it('should mark cell as #REF! (out of data table range for columns)', () => {
const hot = handsontable({
data: getDataForFormulas(0, 'name', ['=K1']),
columns: getColumnsForFormulas(),
formulas: true,
width: 500,
height: 300
});
expect(hot.getDataAtCell(0, 1)).toBe('#REF!');
});
it('should mark cell as #REF! (out of data table range for rows)', () => {
const hot = handsontable({
data: getDataForFormulas(0, 'name', ['=A1000']),
columns: getColumnsForFormulas(),
formulas: true,
width: 500,
height: 300
});
expect(hot.getDataAtCell(0, 1)).toBe('#REF!');
});
it('should recalculate external variables', () => {
const hot = handsontable({
data: getDataForFormulas(0, 'name', ['=TEST_1', '=TEST_1&TEST_2', '=SUM(999, TEST_2)', '=TEST_3']),
columns: getColumnsForFormulas(),
formulas: {
variables: {
TEST_1: 'foo',
TEST_2: 12345,
}
},
width: 500,
height: 300
});
expect(hot.getDataAtCell(0, 1)).toBe('foo');
expect(hot.getDataAtCell(1, 1)).toBe('foo12345');
expect(hot.getDataAtCell(2, 1)).toBe(13344);
expect(hot.getDataAtCell(3, 1)).toBe('#NAME?');
});
it('should recalculate external variables (via constructor)', () => {
const hot = handsontable({
data: getDataForFormulas(0, 'name', ['=TEST_1', '=TEST_1&TEST_2', '=SUM(999, TEST_2)', '=TEST_3']),
columns: getColumnsForFormulas(),
formulas: {
variables: {
TEST_1: 'foo',
TEST_2: 12345,
}
},
width: 500,
height: 300
});
expect(hot.getDataAtCell(0, 1)).toBe('foo');
expect(hot.getDataAtCell(1, 1)).toBe('foo12345');
expect(hot.getDataAtCell(2, 1)).toBe(13344);
expect(hot.getDataAtCell(3, 1)).toBe('#NAME?');
});
it('should recalculate external variables (via setVariable method)', () => {
const hot = handsontable({
data: getDataForFormulas(0, 'name', ['=TEST_1', '=TEST_1&TEST_2', '=SUM(999, TEST_2)', '=TEST_3']),
columns: getColumnsForFormulas(),
formulas: {
variables: {
TEST_1: 'foo'
}
},
width: 500,
height: 300
});
hot.getPlugin('formulas').setVariable('TEST_2', 12345);
hot.getPlugin('formulas').recalculateFull();
expect(hot.getDataAtCell(0, 1)).toBe('foo');
expect(hot.getDataAtCell(1, 1)).toBe('foo12345');
expect(hot.getDataAtCell(2, 1)).toBe(13344);
expect(hot.getDataAtCell(3, 1)).toBe('#NAME?');
});
describe('alter table (insert row)', () => {
it('should recalculate table after added new empty rows', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
});
hot.alter('insert_row', 1, 2);
expect(hot.getDataAtRow(0)).toEqual([0, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 0]);
expect(hot.getDataAtRow(1)).toEqual([null, null, null, null, null, null]);
expect(hot.getDataAtRow(2)).toEqual([null, null, null, null, null, null]);
expect(hot.getDataAtRow(3)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(4)).toEqual([2010, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(5)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(6)).toEqual([2012, 8042, 10058, '#DIV/0!', 12, '\'=SUM(E5)']);
});
it('should recalculate table after changing values into newly added row', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.alter('insert_row', 2, 3);
hot.setDataAtCell(3, 0, 2234);
expect(hot.getDataAtRow(0)).toEqual([0, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 0]);
expect(hot.getDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([null, null, null, null, null, null]);
expect(hot.getDataAtRow(3)).toEqual([2234, null, null, null, null, null]);
expect(hot.getDataAtRow(4)).toEqual([null, null, null, null, null, null]);
expect(hot.getDataAtRow(5)).toEqual([2010, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(6)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(7)).toEqual([2012, 10276, 12292, '#DIV/0!', 12, '\'=SUM(E5)']);
});
});
describe('alter table (insert column)', () => {
it('should recalculate table after added new empty columns', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
contextMenu: true,
});
hot.alter('insert_col', 1, 2);
expect(hot.getDataAtRow(0)).toEqual([0, null, null, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 0]);
expect(hot.getDataAtRow(1)).toEqual([2009, null, null, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, null, null, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(3)).toEqual([2011, null, null, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, null, null, 8042, 10058, '#DIV/0!', 12, '\'=SUM(E5)']);
});
it('should recalculate table after changing values into newly added column', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
contextMenu: true,
});
hot.alter('insert_col', 1, 2);
hot.setDataAtCell(1, 3, 2);
expect(hot.getDataAtRow(0)).toEqual([2, null, null, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 2]);
expect(hot.getDataAtRow(1)).toEqual([2009, null, null, 2, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, null, null, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(3)).toEqual([2011, null, null, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, null, null, 8042, 10058, 1004.5, 12, '\'=SUM(E5)']);
});
});
describe('alter table (remove row)', () => {
it('should recalculate table after removed rows', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.alter('remove_row', 1, 1);
expect(hot.getDataAtRow(0)).toEqual(['#REF!', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '#REF!']);
expect(hot.getDataAtRow(1)).toEqual([2010, 5, 2905, 2867, 2016, 'Maserati']);
expect(hot.getDataAtRow(2)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(3)).toEqual([2012, 6033, 8049, '#REF!', 12, '\'=SUM(E5)']);
});
it('should recalculate table and replace coordinates in formula expressions into #REF! value (removing 2 rows)', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.alter('remove_row', 1, 2);
expect(hot.getSourceDataAtRow(0)).toEqual(['=#REF!', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(2)).toEqual([2012, '=SUM(A2:A3)', '=SUM(B3,#REF!)', '=#REF!/#REF!', 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(0)).toEqual(['#REF!', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '#REF!']);
expect(hot.getDataAtRow(1)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(2)).toEqual([2012, 4023, '#REF!', '#REF!', 12, '\'=SUM(E5)']);
});
it('should recalculate table and replace coordinates in formula expressions into #REF! value (removing first 4 rows)', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.alter('remove_row', 0, 4);
expect(hot.getSourceDataAtRow(0)).toEqual([2012, '=SUM(A1:A1)', '=SUM(B1,#REF!)', '=#REF!/#REF!', 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(0)).toEqual([2012, 2012, '#REF!', '#REF!', 12, '\'=SUM(E5)']);
});
it('should recalculate table and update formula expression after removing rows intersected on the bottom of cell range', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.alter('insert_row', 3, 2);
hot.setDataAtCell(6, 1, '=SUM(A2:A4)');
hot.alter('remove_row', 2, 3);
expect(hot.getSourceDataAtRow(0)).toEqual(['=$B$2', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(2)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(3)).toEqual([2012, '=SUM(A2:A2)', '=SUM(B4,#REF!)', '=A2/B2', 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(0)).toEqual([0, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 0]);
expect(hot.getDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(3)).toEqual([2012, 2009, '#REF!', '#DIV/0!', 12, '\'=SUM(E5)']);
});
it('should recalculate table and update formula expression after removing rows intersected on the top of cell range', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.setDataAtCell(4, 1, '=SUM(A2:A4)');
hot.alter('remove_row', 0, 2);
expect(hot.getSourceDataAtRow(0)).toEqual([2010, 5, 2905, 2867, '=SUM(A2,2,3)', '=#REF!']);
expect(hot.getSourceDataAtRow(1)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(2)).toEqual([2012, '=SUM(A1:A2)', '=SUM(B3,E1)', '=#REF!/#REF!', 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(0)).toEqual([2010, 5, 2905, 2867, 2016, '#REF!']);
expect(hot.getDataAtRow(1)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(2)).toEqual([2012, 4021, 6037, '#REF!', 12, '\'=SUM(E5)']);
});
it('should recalculate table and update formula expression after removing rows contains whole cell range', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.alter('insert_row', 3, 2);
hot.setDataAtCell(6, 1, '=SUM(A2:A4)');
hot.alter('remove_row', 0, 4);
expect(hot.getSourceDataAtRow(0)).toEqual([null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(1)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(2)).toEqual([2012, '=SUM(#REF!)', '=SUM(B3,#REF!)', '=#REF!/#REF!', 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(0)).toEqual([null, null, null, null, null, null]);
expect(hot.getDataAtRow(1)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(2)).toEqual([2012, '#REF!', '#REF!', '#REF!', 12, '\'=SUM(E5)']);
});
});
describe('alter table (remove column)', () => {
it('should recalculate table after removed columns', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.alter('remove_col', 1, 1);
expect(hot.getSourceDataAtRow(0)).toEqual(['=#REF!', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([2009, 2941, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(2)).toEqual([2010, 2905, 2867, '=SUM(A4,2,3)', '=#REF!']);
expect(hot.getSourceDataAtRow(3)).toEqual([2011, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(4)).toEqual([2012, '=SUM(#REF!,D3)', '=A2/#REF!', 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(0)).toEqual(['#REF!', 'Mazda', 'Mercedes', 'Mini', '#REF!']);
expect(hot.getDataAtRow(1)).toEqual([2009, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 2905, 2867, 2016, '#REF!']);
expect(hot.getDataAtRow(3)).toEqual([2011, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, '#REF!', '#REF!', 12, '\'=SUM(E5)']);
});
it('should recalculate table and replace coordinates in formula expressions into #REF! value (removing 2 columns)', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.alter('remove_col', 1, 2);
expect(hot.getSourceDataAtRow(0)).toEqual(['=#REF!', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([2009, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(2)).toEqual([2010, 2867, '=SUM(A4,2,3)', '=#REF!']);
expect(hot.getSourceDataAtRow(3)).toEqual([2011, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(4)).toEqual([2012, '=A2/#REF!', 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(0)).toEqual(['#REF!', 'Mercedes', 'Mini', '#REF!']);
expect(hot.getDataAtRow(1)).toEqual([2009, 4303, 354, 5814]);
expect(hot.getDataAtRow(2)).toEqual([2010, 2867, 2016, '#REF!']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, '#REF!', 12, '\'=SUM(E5)']);
});
it('should recalculate table and replace coordinates in formula expressions into #REF! value (removing first 4 columns)', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.alter('remove_col', 0, 4);
expect(hot.getSourceDataAtRow(0)).toEqual(['Mini', '=#REF!']);
expect(hot.getSourceDataAtRow(1)).toEqual([354, 5814]);
expect(hot.getSourceDataAtRow(2)).toEqual(['=SUM(#REF!,2,3)', '=#REF!']);
expect(hot.getSourceDataAtRow(3)).toEqual([552, 6127]);
expect(hot.getSourceDataAtRow(4)).toEqual([12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(0)).toEqual(['Mini', '#REF!']);
expect(hot.getDataAtRow(1)).toEqual([354, 5814]);
expect(hot.getDataAtRow(2)).toEqual(['#REF!', '#REF!']);
expect(hot.getDataAtRow(3)).toEqual([552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([12, '\'=SUM(E5)']);
});
it('should recalculate table and update formula expression after removing columns intersected on the right of cell range', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.setDataAtCell(1, 5, '=Sum(B2:D2)');
hot.alter('remove_col', 2, 3);
expect(hot.getSourceDataAtRow(0)).toEqual(['=$B$2', 'Maserati', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([2009, 0, '=SUM(B2:B2)']);
expect(hot.getSourceDataAtRow(2)).toEqual([2010, 5, '=$B1']);
expect(hot.getSourceDataAtRow(3)).toEqual([2011, 4, 6127]);
expect(hot.getSourceDataAtRow(4)).toEqual([2012, '=SUM(A2:A5)', '\'=SUM(E5)']);
expect(hot.getDataAtRow(0)).toEqual([0, 'Maserati', 0]);
expect(hot.getDataAtRow(1)).toEqual([2009, 0, 0]);
expect(hot.getDataAtRow(2)).toEqual([2010, 5, 'Maserati']);
expect(hot.getDataAtRow(3)).toEqual([2011, 4, 6127]);
expect(hot.getDataAtRow(4)).toEqual([2012, 8042, '\'=SUM(E5)']);
});
it('should recalculate table and update formula expression after removing columns intersected on the left of cell range', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.setDataAtCell(1, 5, '=Sum(B2:D2)');
hot.alter('remove_col', 0, 3);
expect(hot.getSourceDataAtRow(0)).toEqual(['Mercedes', 'Mini', '=#REF!']);
expect(hot.getSourceDataAtRow(1)).toEqual([4303, 354, '=SUM(A2:A2)']);
expect(hot.getSourceDataAtRow(2)).toEqual([2867, '=SUM(#REF!,2,3)', '=#REF!']);
expect(hot.getSourceDataAtRow(3)).toEqual([4822, 552, 6127]);
expect(hot.getSourceDataAtRow(4)).toEqual(['=#REF!/#REF!', 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(0)).toEqual(['Mercedes', 'Mini', '#REF!']);
expect(hot.getDataAtRow(1)).toEqual([4303, 354, 4303]);
expect(hot.getDataAtRow(2)).toEqual([2867, '#REF!', '#REF!']);
expect(hot.getDataAtRow(3)).toEqual([4822, 552, 6127]);
expect(hot.getDataAtRow(4)).toEqual(['#REF!', 12, '\'=SUM(E5)']);
});
it('should recalculate table and update formula expression after removing columns contains whole cell range', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.setDataAtCell(1, 5, '=Sum(B2:D2)');
hot.alter('remove_col', 0, 4);
expect(hot.getSourceDataAtRow(0)).toEqual(['Mini', '=#REF!']);
expect(hot.getSourceDataAtRow(1)).toEqual([354, '=SUM(#REF!)']);
expect(hot.getSourceDataAtRow(2)).toEqual(['=SUM(#REF!,2,3)', '=#REF!']);
expect(hot.getSourceDataAtRow(3)).toEqual([552, 6127]);
expect(hot.getSourceDataAtRow(4)).toEqual([12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(0)).toEqual(['Mini', '#REF!']);
expect(hot.getDataAtRow(1)).toEqual([354, '#REF!']);
expect(hot.getDataAtRow(2)).toEqual(['#REF!', '#REF!']);
expect(hot.getDataAtRow(3)).toEqual([552, 6127]);
expect(hot.getDataAtRow(4)).toEqual([12, '\'=SUM(E5)']);
});
});
describe('alter table (mixed operations)', () => {
it('should recalculate table and replace coordinates in formula expressions', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.alter('remove_col', 3);
hot.alter('remove_row', 2);
hot.alter('remove_row', 2);
hot.alter('insert_row', 0);
hot.alter('remove_col', 3);
hot.alter('insert_col', 3);
// Make sure that formulas are shifted correctly by recalculate whole table from scratch (after sheet altering)
hot.getPlugin('formulas').recalculateFull();
hot.render();
expect(hot.getSourceDataAtRow(0)).toEqual([null, null, null, null, null]);
expect(hot.getSourceDataAtRow(1)).toEqual(['=$B$3', 'Maserati', 'Mazda', null, '=A$2']);
expect(hot.getSourceDataAtRow(2)).toEqual([2009, 0, 2941, null, 5814]);
expect(hot.getSourceDataAtRow(3)).toEqual([2012, '=SUM(A3:A4)', '=SUM(B4,#REF!)', null, '\'=SUM(E5)']);
expect(hot.getDataAtRow(0)).toEqual([null, null, null, null, null]);
expect(hot.getDataAtRow(1)).toEqual([0, 'Maserati', 'Mazda', null, 0]);
expect(hot.getDataAtRow(2)).toEqual([2009, 0, 2941, null, 5814]);
expect(hot.getDataAtRow(3)).toEqual([2012, 4021, '#REF!', null, '\'=SUM(E5)']);
});
});
describe('undo/redo', () => {
it('should restore previous edited formula expression and recalculate table after that', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300
});
hot.setDataAtCell(0, 5, '=B5');
hot.undo();
expect(hot.getSourceDataAtCell(0, 5)).toBe('=A$1');
expect(hot.getDataAtCell(0, 5)).toBe(0);
hot.redo();
expect(hot.getSourceDataAtCell(0, 5)).toBe('=B5');
expect(hot.getDataAtCell(0, 5)).toBe(8042);
});
it('should restore previous state after alter table (mixed insert operations)', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
contextMenu: true,
});
hot.alter('insert_row', 1, 3);
hot.alter('insert_col', 1);
hot.alter('insert_col', 4, 2);
hot.alter('insert_row', 5);
hot.undo();
expect(hot.getSourceDataAtRow(0)).toEqual(['=$C$5', null, 'Maserati', 'Mazda', null, null, 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([null, null, null, null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(2)).toEqual([null, null, null, null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(3)).toEqual([null, null, null, null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(4)).toEqual([2009, null, 0, 2941, null, null, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(5)).toEqual([2010, null, 5, 2905, null, null, 2867, '=SUM(A7,2,3)', '=$C1']);
expect(hot.getSourceDataAtRow(6)).toEqual([2011, null, 4, 2517, null, null, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(7)).toEqual([2012, null, '=SUM(A5:A8)', '=SUM(C8,H6)', null, null, '=A5/C5', 12, '\'=SUM(E5)']);
hot.undo();
expect(hot.getSourceDataAtRow(0)).toEqual(['=$C$5', null, 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([null, null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(2)).toEqual([null, null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(3)).toEqual([null, null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(4)).toEqual([2009, null, 0, 2941, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(5)).toEqual([2010, null, 5, 2905, 2867, '=SUM(A7,2,3)', '=$C1']);
expect(hot.getSourceDataAtRow(6)).toEqual([2011, null, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(7)).toEqual([2012, null, '=SUM(A5:A8)', '=SUM(C8,F6)', '=A5/C5', 12, '\'=SUM(E5)']);
hot.undo();
expect(hot.getSourceDataAtRow(0)).toEqual(['=$B$5', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(2)).toEqual([null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(3)).toEqual([null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(4)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(5)).toEqual([2010, 5, 2905, 2867, '=SUM(A7,2,3)', '=$B1']);
expect(hot.getSourceDataAtRow(6)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(7)).toEqual([2012, '=SUM(A5:A8)', '=SUM(B8,E6)', '=A5/B5', 12, '\'=SUM(E5)']);
hot.undo();
expect(hot.getSourceDataAtRow(0)).toEqual(['=$B$2', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(2)).toEqual([2010, 5, 2905, 2867, '=SUM(A4,2,3)', '=$B1']);
expect(hot.getSourceDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(4)).toEqual([2012, '=SUM(A2:A5)', '=SUM(B5,E3)', '=A2/B2', 12, '\'=SUM(E5)']);
});
it('should redo into the next state after alter table (mixed insert operations)', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
contextMenu: true,
});
hot.alter('insert_row', 1, 3);
hot.alter('insert_col', 1);
hot.alter('insert_col', 4, 2);
hot.alter('insert_row', 5);
hot.undo();
hot.undo();
hot.undo();
hot.undo();
expect(hot.getSourceDataAtRow(0)).toEqual(['=$B$2', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(2)).toEqual([2010, 5, 2905, 2867, '=SUM(A4,2,3)', '=$B1']);
expect(hot.getSourceDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(4)).toEqual([2012, '=SUM(A2:A5)', '=SUM(B5,E3)', '=A2/B2', 12, '\'=SUM(E5)']);
hot.redo();
expect(hot.getSourceDataAtRow(0)).toEqual(['=$B$5', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(2)).toEqual([null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(3)).toEqual([null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(4)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(5)).toEqual([2010, 5, 2905, 2867, '=SUM(A7,2,3)', '=$B1']);
expect(hot.getSourceDataAtRow(6)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(7)).toEqual([2012, '=SUM(A5:A8)', '=SUM(B8,E6)', '=A5/B5', 12, '\'=SUM(E5)']);
hot.redo();
expect(hot.getSourceDataAtRow(0)).toEqual(['=$C$5', null, 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([null, null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(2)).toEqual([null, null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(3)).toEqual([null, null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(4)).toEqual([2009, null, 0, 2941, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(5)).toEqual([2010, null, 5, 2905, 2867, '=SUM(A7,2,3)', '=$C1']);
expect(hot.getSourceDataAtRow(6)).toEqual([2011, null, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(7)).toEqual([2012, null, '=SUM(A5:A8)', '=SUM(C8,F6)', '=A5/C5', 12, '\'=SUM(E5)']);
hot.redo();
expect(hot.getSourceDataAtRow(0)).toEqual(['=$C$5', null, 'Maserati', 'Mazda', null, null, 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([null, null, null, null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(2)).toEqual([null, null, null, null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(3)).toEqual([null, null, null, null, null, null, null, null, null]);
expect(hot.getSourceDataAtRow(4)).toEqual([2009, null, 0, 2941, null, null, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(5)).toEqual([2010, null, 5, 2905, null, null, 2867, '=SUM(A7,2,3)', '=$C1']);
expect(hot.getSourceDataAtRow(6)).toEqual([2011, null, 4, 2517, null, null, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(7)).toEqual([2012, null, '=SUM(A5:A8)', '=SUM(C8,H6)', null, null, '=A5/C5', 12, '\'=SUM(E5)']);
});
it('should restore previous state after alter table (mixed remove operations)', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
contextMenu: true,
});
hot.alter('remove_row', 2);
hot.alter('remove_col', 2, 2);
hot.alter('remove_row', 0, 2);
hot.alter('remove_col', 3);
hot.undo();
expect(hot.getSourceDataAtRow(0)).toEqual([2011, 4, 552, 6127]);
expect(hot.getSourceDataAtRow(1)).toEqual([2012, '=SUM(A1:A2)', 12, '\'=SUM(E5)']);
hot.undo();
expect(hot.getSourceDataAtRow(0)).toEqual(['=$B$2', 'Maserati', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([2009, 0, 354, 5814]);
expect(hot.getSourceDataAtRow(2)).toEqual([2011, 4, 552, 6127]);
expect(hot.getSourceDataAtRow(3)).toEqual([2012, '=SUM(A2:A4)', 12, '\'=SUM(E5)']);
hot.undo();
expect(hot.getSourceDataAtRow(0)).toEqual(['=$B$2', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(2)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(3)).toEqual([2012, '=SUM(A2:A4)', '=SUM(B4,#REF!)', '=A2/B2', 12, '\'=SUM(E5)']);
hot.undo();
expect(hot.getSourceDataAtRow(0)).toEqual(['=$B$2', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(2)).toEqual([2010, 5, 2905, 2867, '=SUM(A4,2,3)', '=$B1']);
expect(hot.getSourceDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(4)).toEqual([2012, '=Sum(a2:a5)', '=SUM(B5,E3)', '=A2/B2', 12, '\'=SUM(E5)']);
});
it('should redo into the next state after alter table (mixed remove operations)', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
width: 500,
height: 300,
contextMenu: true,
});
hot.alter('remove_row', 2);
hot.alter('remove_col', 2, 2);
hot.alter('remove_row', 0, 2);
hot.alter('remove_col', 3);
hot.undo();
hot.undo();
hot.undo();
hot.undo();
expect(hot.getSourceDataAtRow(0)).toEqual(['=$B$2', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(2)).toEqual([2010, 5, 2905, 2867, '=SUM(A4,2,3)', '=$B1']);
expect(hot.getSourceDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(4)).toEqual([2012, '=Sum(a2:a5)', '=SUM(B5,E3)', '=A2/B2', 12, '\'=SUM(E5)']);
hot.redo();
expect(hot.getSourceDataAtRow(0)).toEqual(['=$B$2', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(2)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(3)).toEqual([2012, '=SUM(A2:A4)', '=SUM(B4,#REF!)', '=A2/B2', 12, '\'=SUM(E5)']);
hot.redo();
expect(hot.getSourceDataAtRow(0)).toEqual(['=$B$2', 'Maserati', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([2009, 0, 354, 5814]);
expect(hot.getSourceDataAtRow(2)).toEqual([2011, 4, 552, 6127]);
expect(hot.getSourceDataAtRow(3)).toEqual([2012, '=SUM(A2:A4)', 12, '\'=SUM(E5)']);
hot.redo();
expect(hot.getSourceDataAtRow(0)).toEqual([2011, 4, 552, 6127]);
expect(hot.getSourceDataAtRow(1)).toEqual([2012, '=SUM(A1:A2)', 12, '\'=SUM(E5)']);
});
});
describe('column sorting', () => {
it('should recalculate all formulas and update theirs cell coordinates if needed', () => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
columnSorting: true,
width: 500,
height: 300
});
hot.updateSettings({ columnSorting: { initialConfig: { column: 2, sortOrder: 'asc' } } });
// source data is not involved in the translation process
expect(hot.getSourceDataAtRow(0)).toEqual(['=$B$2', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(2)).toEqual([2010, 5, 2905, 2867, '=SUM(A3,2,3)', '=#REF!']);
expect(hot.getSourceDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(4)).toEqual([2012, '=SUM(A1:A4)', '=SUM(B4,E2)', '=A1/B1', 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(0)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(1)).toEqual([2010, 5, 2905, 2867, 2014, '#REF!']);
expect(hot.getDataAtRow(2)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(3)).toEqual([2012, 8042, 10056, 502.75, 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(4)).toEqual([5, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 2011]);
hot.updateSettings({ columnSorting: { initialConfig: { column: 5, sortOrder: 'desc' } } });
// source data is not involved in the translation process
expect(hot.getSourceDataAtRow(0)).toEqual(['=$B$2', 'Maserati', 'Mazda', 'Mercedes', 'Mini', '=A$1']);
expect(hot.getSourceDataAtRow(1)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getSourceDataAtRow(2)).toEqual([2010, 5, 2905, 2867, '=SUM(A3,2,3)', '=#REF!']);
expect(hot.getSourceDataAtRow(3)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getSourceDataAtRow(4)).toEqual([2012, '=SUM(#REF!)', '=SUM(B1,#REF!)', '=#REF!/#REF!', 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(0)).toEqual([2012, '#REF!', '#REF!', '#REF!', 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(1)).toEqual([2010, 5, 2905, 2867, 2016, '#REF!']);
expect(hot.getDataAtRow(2)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(3)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(4)).toEqual([5, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 2012]);
});
it('should recalculate formula after precedent cells value was changed', (done) => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
columnSorting: true,
width: 500,
height: 300
});
hot.updateSettings({ columnSorting: { initialConfig: { column: 2, sortOrder: 'asc' } } });
setTimeout(() => {
hot.setDataAtCell(4, 0, '');
expect(hot.getDataAtRow(0)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(1)).toEqual([2010, 5, 2905, 2867, 2014, '#REF!']);
expect(hot.getDataAtRow(2)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(3)).toEqual([2012, 8042, 10056, 502.75, 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(4)).toEqual(['', 'Maserati', 'Mazda', 'Mercedes', 'Mini', 2011]);
hot.setDataAtCell(0, 0, 1);
expect(hot.getDataAtRow(0)).toEqual([1, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(1)).toEqual([2010, 5, 2905, 2867, 2014, '#REF!']);
expect(hot.getDataAtRow(2)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(3)).toEqual([2012, 6032, 8046, 0.25, 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(4)).toEqual(['', 'Maserati', 'Mazda', 'Mercedes', 'Mini', 1]);
hot.setDataAtCell(1, 0, 2);
expect(hot.getDataAtRow(0)).toEqual([1, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(1)).toEqual([2, 5, 2905, 2867, 2014, '#REF!']);
expect(hot.getDataAtRow(2)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(3)).toEqual([2012, 4024, 6038, 0.25, 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(4)).toEqual(['', 'Maserati', 'Mazda', 'Mercedes', 'Mini', 1]);
hot.setDataAtCell(2, 0, 3);
expect(hot.getDataAtRow(0)).toEqual([1, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(1)).toEqual([2, 5, 2905, 2867, 8, '#REF!']);
expect(hot.getDataAtRow(2)).toEqual([3, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(3)).toEqual([2012, 2018, 2026, 0.25, 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(4)).toEqual(['', 'Maserati', 'Mazda', 'Mercedes', 'Mini', 1]);
hot.setDataAtCell(3, 0, 4);
expect(hot.getDataAtRow(0)).toEqual([1, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(1)).toEqual([2, 5, 2905, 2867, 8, '#REF!']);
expect(hot.getDataAtRow(2)).toEqual([3, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(3)).toEqual([4, 10, 18, 0.25, 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(4)).toEqual(['', 'Maserati', 'Mazda', 'Mercedes', 'Mini', 1]);
done();
}, 200);
});
it('should corectly recalculate formulas after changing formula expression in sorted cell', (done) => {
const hot = handsontable({
data: getDataSimpleExampleFormulas(),
formulas: true,
columnSorting: true,
width: 500,
height: 300
});
hot.updateSettings({ columnSorting: { initialConfig: { column: 2, sortOrder: 'asc' } } });
setTimeout(() => {
hot.setDataAtCell(3, 1, '=SUM(B1:B3)');
expect(hot.getDataAtRow(0)).toEqual([2011, 4, 2517, 4822, 552, 6127]);
expect(hot.getDataAtRow(1)).toEqual([2010, 5, 2905, 2867, 2014, '#REF!']);
expect(hot.getDataAtRow(2)).toEqual([2009, 0, 2941, 4303, 354, 5814]);
expect(hot.getDataAtRow(3)).toEqual([2012, 9, 2023, 502.75, 12, '\'=SUM(E5)']);
expect(hot.getDataAtRow(4)).toEqual([5, 'Maserati', 'Mazda', 'Mercedes', 'Mini', 2011]);
done();
}, 200);
});
});
});
| 1 | 16,434 | Should we even fix this test? IMO it should be refactored to check that values are recalculated after `setSourceDataAtCell` and `setSourceDataAtRowProp` calls. This will be the correct way to alter the data since now and we're not testing this use case. | handsontable-handsontable | js |
@@ -46,11 +46,6 @@ class ModelRole(enum.IntEnum):
item = Qt.UserRole
-
-# Remember the last used directory
-last_used_directory = None
-
-
# All REFRESH_INTERVAL milliseconds, speeds will be recalculated and downloads
# redrawn.
_REFRESH_INTERVAL = 500 | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2019 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Shared QtWebKit/QtWebEngine code for downloads."""
import re
import sys
import html
import os.path
import collections
import functools
import pathlib
import tempfile
import enum
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, Qt, QObject, QModelIndex,
QTimer, QAbstractListModel, QUrl)
from qutebrowser.browser import pdfjs
from qutebrowser.api import cmdutils
from qutebrowser.config import config
from qutebrowser.utils import (usertypes, standarddir, utils, message, log,
qtutils, objreg)
from qutebrowser.qt import sip
class ModelRole(enum.IntEnum):
"""Custom download model roles."""
item = Qt.UserRole
# Remember the last used directory
last_used_directory = None
# All REFRESH_INTERVAL milliseconds, speeds will be recalculated and downloads
# redrawn.
_REFRESH_INTERVAL = 500
class UnsupportedAttribute:
"""Class which is used to create attributes which are not supported.
This is used for attributes like "fileobj" for downloads which are not
supported with QtWebengine.
"""
class UnsupportedOperationError(Exception):
"""Raised when an operation is not supported with the given backend."""
def download_dir():
"""Get the download directory to use."""
directory = config.val.downloads.location.directory
remember_dir = config.val.downloads.location.remember
if remember_dir and last_used_directory is not None:
ddir = last_used_directory
elif directory is None:
ddir = standarddir.download()
else:
ddir = directory
try:
os.makedirs(ddir, exist_ok=True)
except OSError as e:
message.error("Failed to create download directory: {}".format(e))
return ddir
def immediate_download_path(prompt_download_directory=None):
"""Try to get an immediate download path without asking the user.
If that's possible, we return a path immediately. If not, None is returned.
Args:
prompt_download_directory: If this is something else than None, it
will overwrite the
downloads.location.prompt setting.
"""
if prompt_download_directory is None:
prompt_download_directory = config.val.downloads.location.prompt
if not prompt_download_directory:
return download_dir()
return None
def _path_suggestion(filename):
"""Get the suggested file path.
Args:
filename: The filename to use if included in the suggestion.
"""
suggestion = config.val.downloads.location.suggestion
if suggestion == 'path':
# add trailing '/' if not present
return os.path.join(download_dir(), '')
elif suggestion == 'filename':
return filename
elif suggestion == 'both':
return os.path.join(download_dir(), filename)
else: # pragma: no cover
raise ValueError("Invalid suggestion value {}!".format(suggestion))
def create_full_filename(basename, filename):
"""Create a full filename based on the given basename and filename.
Args:
basename: The basename to use if filename is a directory.
filename: The path to a folder or file where you want to save.
Return:
The full absolute path, or None if filename creation was not possible.
"""
basename = utils.sanitize_filename(basename)
# Filename can be a full path so don't use sanitize_filename on it.
# Remove chars which can't be encoded in the filename encoding.
# See https://github.com/qutebrowser/qutebrowser/issues/427
encoding = sys.getfilesystemencoding()
filename = utils.force_encoding(filename, encoding)
if os.path.isabs(filename) and (os.path.isdir(filename) or
filename.endswith(os.sep)):
# We got an absolute directory from the user, so we save it under
# the default filename in that directory.
return os.path.join(filename, basename)
elif os.path.isabs(filename):
# We got an absolute filename from the user, so we save it under
# that filename.
return filename
return None
def get_filename_question(*, suggested_filename, url, parent=None):
"""Get a Question object for a download-path.
Args:
suggested_filename: The "default"-name that is pre-entered as path.
url: The URL the download originated from.
parent: The parent of the question (a QObject).
"""
suggested_filename = utils.sanitize_filename(suggested_filename)
q = usertypes.Question(parent)
q.title = "Save file to:"
q.text = "Please enter a location for <b>{}</b>".format(
html.escape(url.toDisplayString()))
q.url = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded)
q.mode = usertypes.PromptMode.download
q.completed.connect(q.deleteLater)
q.default = _path_suggestion(suggested_filename)
return q
def transform_path(path):
r"""Do platform-specific transformations, like changing E: to E:\.
Returns None if the path is invalid on the current platform.
"""
if not utils.is_windows:
return path
path = utils.expand_windows_drive(path)
# Drive dependent working directories are not supported, e.g.
# E:filename is invalid
if re.search(r'^[A-Z]:[^\\]', path, re.IGNORECASE):
return None
# Paths like COM1, ...
# See https://github.com/qutebrowser/qutebrowser/issues/82
if pathlib.Path(path).is_reserved():
return None
return path
def suggested_fn_from_title(url_path, title=None):
"""Suggest a filename depending on the URL extension and page title.
Args:
url_path: a string with the URL path
title: the page title string
Return:
The download filename based on the title, or None if the extension is
not found in the whitelist (or if there is no page title).
"""
ext_whitelist = [".html", ".htm", ".php", ""]
_, ext = os.path.splitext(url_path)
if ext.lower() in ext_whitelist and title:
suggested_fn = utils.sanitize_filename(title)
if not suggested_fn.lower().endswith((".html", ".htm")):
suggested_fn += ".html"
else:
suggested_fn = None
return suggested_fn
class NoFilenameError(Exception):
"""Raised when we can't find out a filename in DownloadTarget."""
# Where a download should be saved
class _DownloadTarget:
"""Abstract base class for different download targets."""
def suggested_filename(self):
"""Get the suggested filename for this download target."""
raise NotImplementedError
class FileDownloadTarget(_DownloadTarget):
"""Save the download to the given file.
Attributes:
filename: Filename where the download should be saved.
force_overwrite: Whether to overwrite the target without
prompting the user.
"""
def __init__(self, filename, force_overwrite=False):
self.filename = filename
self.force_overwrite = force_overwrite
def suggested_filename(self):
return os.path.basename(self.filename)
def __str__(self):
return self.filename
class FileObjDownloadTarget(_DownloadTarget):
"""Save the download to the given file-like object.
Attributes:
fileobj: File-like object where the download should be written to.
"""
def __init__(self, fileobj):
self.fileobj = fileobj
def suggested_filename(self):
try:
return self.fileobj.name
except AttributeError:
raise NoFilenameError
def __str__(self):
try:
return 'file object at {}'.format(self.fileobj.name)
except AttributeError:
return 'anonymous file object'
class OpenFileDownloadTarget(_DownloadTarget):
"""Save the download in a temp dir and directly open it.
Attributes:
cmdline: The command to use as string. A `{}` is expanded to the
filename. None means to use the system's default application.
If no `{}` is found, the filename is appended to the cmdline.
"""
def __init__(self, cmdline=None):
self.cmdline = cmdline
def suggested_filename(self):
raise NoFilenameError
def __str__(self):
return 'temporary file'
class PDFJSDownloadTarget(_DownloadTarget):
"""Open the download via PDF.js."""
def suggested_filename(self):
raise NoFilenameError
def __str__(self):
return 'temporary PDF.js file'
class DownloadItemStats(QObject):
"""Statistics (bytes done, total bytes, time, etc.) about a download.
Class attributes:
SPEED_AVG_WINDOW: How many seconds of speed data to average to
estimate the remaining time.
Attributes:
done: How many bytes there are already downloaded.
total: The total count of bytes. None if the total is unknown.
speed: The current download speed, in bytes per second.
_speed_avg: A rolling average of speeds.
_last_done: The count of bytes which where downloaded when calculating
the speed the last time.
"""
SPEED_AVG_WINDOW = 30
def __init__(self, parent=None):
super().__init__(parent)
self.total = None
self.done = 0
self.speed = 0
self._last_done = 0
samples = int(self.SPEED_AVG_WINDOW * (1000 / _REFRESH_INTERVAL))
self._speed_avg = collections.deque(maxlen=samples)
def update_speed(self):
"""Recalculate the current download speed.
The caller needs to guarantee this is called all _REFRESH_INTERVAL ms.
"""
if self.done is None:
# this can happen for very fast downloads, e.g. when actually
# opening a file
return
delta = self.done - self._last_done
self.speed = delta * 1000 / _REFRESH_INTERVAL
self._speed_avg.append(self.speed)
self._last_done = self.done
def finish(self):
"""Set the download stats as finished."""
self.done = self.total
def percentage(self):
"""The current download percentage, or None if unknown."""
if self.done == self.total:
return 100
elif self.total == 0 or self.total is None:
return None
else:
return 100 * self.done / self.total
def remaining_time(self):
"""The remaining download time in seconds, or None."""
if self.total is None or not self._speed_avg:
# No average yet or we don't know the total size.
return None
remaining_bytes = self.total - self.done
avg = sum(self._speed_avg) / len(self._speed_avg)
if avg == 0:
# Download stalled
return None
else:
return remaining_bytes / avg
@pyqtSlot('qint64', 'qint64')
def on_download_progress(self, bytes_done, bytes_total):
"""Update local variables when the download progress changed.
Args:
bytes_done: How many bytes are downloaded.
bytes_total: How many bytes there are to download in total.
"""
if bytes_total in [0, -1]: # QtWebEngine, QtWebKit
bytes_total = None
self.done = bytes_done
self.total = bytes_total
class AbstractDownloadItem(QObject):
"""Shared QtNetwork/QtWebEngine part of a download item.
Attributes:
done: Whether the download is finished.
stats: A DownloadItemStats object.
index: The index of the download in the view.
successful: Whether the download has completed successfully.
error_msg: The current error message, or None
fileobj: The file object to download the file to.
raw_headers: The headers sent by the server.
_filename: The filename of the download.
_dead: Whether the Download has _die()'d.
Signals:
data_changed: The downloads metadata changed.
finished: The download was finished.
cancelled: The download was cancelled.
error: An error with the download occurred.
arg: The error message as string.
remove_requested: Emitted when the removal of this download was
requested.
pdfjs_requested: Emitted when PDF.js should be opened with the given
filename.
"""
data_changed = pyqtSignal()
finished = pyqtSignal()
error = pyqtSignal(str)
cancelled = pyqtSignal()
remove_requested = pyqtSignal()
pdfjs_requested = pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent)
self.done = False
self.stats = DownloadItemStats(self)
self.index = 0
self.error_msg = None
self.basename = '???'
self.successful = False
self.fileobj = UnsupportedAttribute()
self.raw_headers = UnsupportedAttribute()
self._filename = None
self._dead = False
def __repr__(self):
return utils.get_repr(self, basename=self.basename)
def __str__(self):
"""Get the download as a string.
Example: foo.pdf [699.2kB/s|0.34|16%|4.253/25.124]
"""
speed = utils.format_size(self.stats.speed, suffix='B/s')
down = utils.format_size(self.stats.done, suffix='B')
perc = self.stats.percentage()
remaining = self.stats.remaining_time()
if self.error_msg is None:
errmsg = ""
else:
errmsg = " - {}".format(self.error_msg)
if all(e is None for e in [perc, remaining, self.stats.total]):
return ('{index}: {name} [{speed:>10}|{down}]{errmsg}'.format(
index=self.index, name=self.basename, speed=speed,
down=down, errmsg=errmsg))
perc = round(perc)
if remaining is None:
remaining = '?'
else:
remaining = utils.format_seconds(remaining)
total = utils.format_size(self.stats.total, suffix='B')
if self.done:
return ('{index}: {name} [{perc:>2}%|{total}]{errmsg}'.format(
index=self.index, name=self.basename, perc=perc,
total=total, errmsg=errmsg))
else:
return ('{index}: {name} [{speed:>10}|{remaining:>5}|{perc:>2}%|'
'{down}/{total}]{errmsg}'.format(
index=self.index, name=self.basename, speed=speed,
remaining=remaining, perc=perc, down=down,
total=total, errmsg=errmsg))
def _do_die(self):
"""Do cleanup steps after a download has died."""
raise NotImplementedError
def _die(self, msg):
"""Abort the download and emit an error."""
assert not self.successful
# Prevent actions if calling _die() twice.
#
# For QtWebKit, this might happen if the error handler correctly
# connects, and the error occurs in _init_reply between
# reply.error.connect and the reply.error() check. In this case, the
# connected error handlers will be called twice, once via the direct
# error.emit() and once here in _die(). The stacks look like this then:
#
# <networkmanager error.emit> -> on_reply_error -> _die ->
# self.error.emit()
#
# and
#
# [_init_reply -> <single shot timer> ->] <lambda in _init_reply> ->
# self.error.emit()
#
# which may lead to duplicate error messages (and failing tests)
if self._dead:
return
self._dead = True
self._do_die()
self.error_msg = msg
self.stats.finish()
self.error.emit(msg)
self.done = True
self.data_changed.emit()
def get_status_color(self, position):
"""Choose an appropriate color for presenting the download's status.
Args:
position: The color type requested, can be 'fg' or 'bg'.
"""
assert position in ["fg", "bg"]
# pylint: disable=bad-config-option
start = getattr(config.val.colors.downloads.start, position)
stop = getattr(config.val.colors.downloads.stop, position)
system = getattr(config.val.colors.downloads.system, position)
error = getattr(config.val.colors.downloads.error, position)
# pylint: enable=bad-config-option
if self.error_msg is not None:
assert not self.successful
return error
elif self.stats.percentage() is None:
return start
else:
return utils.interpolate_color(start, stop,
self.stats.percentage(), system)
def _do_cancel(self):
"""Actual cancel implementation."""
raise NotImplementedError
@pyqtSlot()
def cancel(self, *, remove_data=True):
"""Cancel the download.
Args:
remove_data: Whether to remove the downloaded data.
"""
self._do_cancel()
log.downloads.debug("cancelled")
if remove_data:
self.delete()
self.done = True
self.finished.emit()
self.data_changed.emit()
@pyqtSlot()
def remove(self):
"""Remove the download from the model."""
self.remove_requested.emit()
def delete(self):
"""Delete the downloaded file."""
try:
if self._filename is not None and os.path.exists(self._filename):
os.remove(self._filename)
log.downloads.debug("Deleted {}".format(self._filename))
else:
log.downloads.debug("Not deleting {}".format(self._filename))
except OSError:
log.downloads.exception("Failed to remove partial file")
@pyqtSlot()
def retry(self):
"""Retry a failed download."""
raise NotImplementedError
@pyqtSlot()
def try_retry(self):
"""Try to retry a download and show an error if it's unsupported."""
try:
self.retry()
except UnsupportedOperationError as e:
message.error(str(e))
def _get_open_filename(self):
"""Get the filename to open a download.
Returns None if no suitable filename was found.
"""
raise NotImplementedError
@pyqtSlot()
def open_file(self, cmdline=None):
"""Open the downloaded file.
Args:
cmdline: The command to use as string. A `{}` is expanded to the
filename. None means to use the system's default
application or `downloads.open_dispatcher` if set. If no
`{}` is found, the filename is appended to the cmdline.
"""
assert self.successful
filename = self._get_open_filename()
if filename is None: # pragma: no cover
log.downloads.error("No filename to open the download!")
return
# By using a singleshot timer, we ensure that we return fast. This
# is important on systems where process creation takes long, as
# otherwise the prompt might hang around and cause bugs
# (see issue #2296)
QTimer.singleShot(0, lambda: utils.open_file(filename, cmdline))
def _ensure_can_set_filename(self, filename):
"""Make sure we can still set a filename."""
raise NotImplementedError
def _after_set_filename(self):
"""Finish initialization based on self._filename."""
raise NotImplementedError
def _ask_confirm_question(self, title, msg):
"""Ask a confirmation question for the download."""
raise NotImplementedError
def _ask_create_parent_question(self, title, msg,
force_overwrite, remember_directory):
"""Ask a confirmation question for the parent directory."""
raise NotImplementedError
def _set_fileobj(self, fileobj, *, autoclose=True):
"""Set a file object to save the download to.
Not supported by QtWebEngine.
Args:
fileobj: The file object to download to.
autoclose: Close the file object automatically when it's done.
"""
raise NotImplementedError
def _set_tempfile(self, fileobj):
"""Set a temporary file when opening the download."""
raise NotImplementedError
def _set_filename(self, filename, *, force_overwrite=False,
remember_directory=True):
"""Set the filename to save the download to.
Args:
filename: The full filename to save the download to.
None: special value to stop the download.
force_overwrite: Force overwriting existing files.
remember_directory: If True, remember the directory for future
downloads.
"""
filename = os.path.expanduser(filename)
self._ensure_can_set_filename(filename)
self._filename = create_full_filename(self.basename, filename)
if self._filename is None:
# We only got a filename (without directory) or a relative path
# from the user, so we append that to the default directory and
# try again.
self._filename = create_full_filename(
self.basename, os.path.join(download_dir(), filename))
# At this point, we have a misconfigured XDG_DOWNLOAD_DIR, as
# download_dir() + filename is still no absolute path.
# The config value is checked for "absoluteness", but
# ~/.config/user-dirs.dirs may be misconfigured and a non-absolute path
# may be set for XDG_DOWNLOAD_DIR
if self._filename is None:
message.error(
"XDG_DOWNLOAD_DIR points to a relative path - please check"
" your ~/.config/user-dirs.dirs. The download is saved in"
" your home directory.",
)
# fall back to $HOME as download_dir
self._filename = create_full_filename(self.basename,
os.path.expanduser('~'))
dirname = os.path.dirname(self._filename)
if not os.path.exists(dirname):
txt = ("<b>{}</b> does not exist. Create it?".
format(html.escape(
os.path.join(dirname, ""))))
self._ask_create_parent_question("Create directory?", txt,
force_overwrite,
remember_directory)
else:
self._after_create_parent_question(force_overwrite,
remember_directory)
def _after_create_parent_question(self,
force_overwrite, remember_directory):
"""After asking about parent directory.
Args:
force_overwrite: Force overwriting existing files.
remember_directory: If True, remember the directory for future
downloads.
"""
global last_used_directory
try:
os.makedirs(os.path.dirname(self._filename), exist_ok=True)
except OSError as e:
self._die(e.strerror)
self.basename = os.path.basename(self._filename)
if remember_directory:
last_used_directory = os.path.dirname(self._filename)
log.downloads.debug("Setting filename to {}".format(self._filename))
if force_overwrite:
self._after_set_filename()
elif os.path.isfile(self._filename):
# The file already exists, so ask the user if it should be
# overwritten.
txt = "<b>{}</b> already exists. Overwrite?".format(
html.escape(self._filename))
self._ask_confirm_question("Overwrite existing file?", txt)
# FIFO, device node, etc. Make sure we want to do this
elif (os.path.exists(self._filename) and
not os.path.isdir(self._filename)):
txt = ("<b>{}</b> already exists and is a special file. Write to "
"it anyways?".format(html.escape(self._filename)))
self._ask_confirm_question("Overwrite special file?", txt)
else:
self._after_set_filename()
def _open_if_successful(self, cmdline):
"""Open the downloaded file, but only if it was successful.
Args:
cmdline: Passed to DownloadItem.open_file().
"""
if not self.successful:
log.downloads.debug("{} finished but not successful, not opening!"
.format(self))
return
self.open_file(cmdline)
def _pdfjs_if_successful(self):
"""Open the file via PDF.js if downloading was successful."""
if not self.successful:
log.downloads.debug("{} finished but not successful, not opening!"
.format(self))
return
filename = self._get_open_filename()
if filename is None: # pragma: no cover
log.downloads.error("No filename to open the download!")
return
self.pdfjs_requested.emit(os.path.basename(filename))
def set_target(self, target):
"""Set the target for a given download.
Args:
target: The DownloadTarget for this download.
"""
if isinstance(target, FileObjDownloadTarget):
self._set_fileobj(target.fileobj, autoclose=False)
elif isinstance(target, FileDownloadTarget):
self._set_filename(
target.filename, force_overwrite=target.force_overwrite)
elif isinstance(target, (OpenFileDownloadTarget, PDFJSDownloadTarget)):
try:
fobj = temp_download_manager.get_tmpfile(self.basename)
except OSError as exc:
msg = "Download error: {}".format(exc)
message.error(msg)
self.cancel()
return
if isinstance(target, OpenFileDownloadTarget):
self.finished.connect(functools.partial(
self._open_if_successful, target.cmdline))
elif isinstance(target, PDFJSDownloadTarget):
self.finished.connect(self._pdfjs_if_successful)
else:
raise utils.Unreachable
self._set_tempfile(fobj)
else: # pragma: no cover
raise ValueError("Unsupported download target: {}".format(target))
class AbstractDownloadManager(QObject):
"""Backend-independent download manager code.
Attributes:
downloads: A list of active DownloadItems.
_networkmanager: A NetworkManager for generic downloads.
Signals:
begin_remove_row: Emitted before downloads are removed.
end_remove_row: Emitted after downloads are removed.
begin_insert_row: Emitted before downloads are inserted.
end_insert_row: Emitted after downloads are inserted.
data_changed: Emitted when the data of the model changed.
The argument is the index of the changed download
"""
begin_remove_row = pyqtSignal(int)
end_remove_row = pyqtSignal()
begin_insert_row = pyqtSignal(int)
end_insert_row = pyqtSignal()
data_changed = pyqtSignal(int)
def __init__(self, parent=None):
super().__init__(parent)
self.downloads = []
self._update_timer = usertypes.Timer(self, 'download-update')
self._update_timer.timeout.connect(self._update_gui)
self._update_timer.setInterval(_REFRESH_INTERVAL)
def __repr__(self):
return utils.get_repr(self, downloads=len(self.downloads))
@pyqtSlot()
def _update_gui(self):
"""Periodical GUI update of all items."""
assert self.downloads
for dl in self.downloads:
dl.stats.update_speed()
self.data_changed.emit(-1)
@pyqtSlot(str)
def _on_pdfjs_requested(self, filename):
"""Open PDF.js when a download requests it."""
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window='last-focused')
tabbed_browser.tabopen(pdfjs.get_main_url(filename), background=False)
def _init_item(self, download, auto_remove, suggested_filename):
"""Initialize a newly created DownloadItem."""
download.cancelled.connect(download.remove)
download.remove_requested.connect(functools.partial(
self._remove_item, download))
delay = config.val.downloads.remove_finished
if delay > -1:
download.finished.connect(
lambda: QTimer.singleShot(delay, download.remove))
elif auto_remove:
download.finished.connect(download.remove)
download.data_changed.connect(
functools.partial(self._on_data_changed, download))
download.error.connect(self._on_error)
download.pdfjs_requested.connect(self._on_pdfjs_requested)
download.basename = suggested_filename
idx = len(self.downloads)
download.index = idx + 1 # "Human readable" index
self.begin_insert_row.emit(idx)
self.downloads.append(download)
self.end_insert_row.emit()
if not self._update_timer.isActive():
self._update_timer.start()
@pyqtSlot(AbstractDownloadItem)
def _on_data_changed(self, download):
"""Emit data_changed signal when download data changed."""
try:
idx = self.downloads.index(download)
except ValueError:
# download has been deleted in the meantime
return
self.data_changed.emit(idx)
@pyqtSlot(str)
def _on_error(self, msg):
"""Display error message on download errors."""
message.error("Download error: {}".format(msg))
@pyqtSlot(AbstractDownloadItem)
def _remove_item(self, download):
"""Remove a given download."""
if sip.isdeleted(self):
# https://github.com/qutebrowser/qutebrowser/issues/1242
return
try:
idx = self.downloads.index(download)
except ValueError:
# already removed
return
self.begin_remove_row.emit(idx)
del self.downloads[idx]
self.end_remove_row.emit()
download.deleteLater()
self._update_indexes()
if not self.downloads:
self._update_timer.stop()
log.downloads.debug("Removed download {}".format(download))
def _update_indexes(self):
"""Update indexes of all DownloadItems."""
for i, d in enumerate(self.downloads, 1):
d.index = i
self.data_changed.emit(-1)
def _init_filename_question(self, question, download):
"""Set up an existing filename question with a download."""
question.answered.connect(download.set_target)
question.cancelled.connect(download.cancel)
download.cancelled.connect(question.abort)
download.error.connect(question.abort)
class DownloadModel(QAbstractListModel):
"""A list model showing downloads."""
def __init__(self, qtnetwork_manager, webengine_manager=None, parent=None):
super().__init__(parent)
self._qtnetwork_manager = qtnetwork_manager
self._webengine_manager = webengine_manager
qtnetwork_manager.data_changed.connect(
functools.partial(self._on_data_changed, webengine=False))
qtnetwork_manager.begin_insert_row.connect(
functools.partial(self._on_begin_insert_row, webengine=False))
qtnetwork_manager.begin_remove_row.connect(
functools.partial(self._on_begin_remove_row, webengine=False))
qtnetwork_manager.end_insert_row.connect(self.endInsertRows)
qtnetwork_manager.end_remove_row.connect(self.endRemoveRows)
if webengine_manager is not None:
webengine_manager.data_changed.connect(
functools.partial(self._on_data_changed, webengine=True))
webengine_manager.begin_insert_row.connect(
functools.partial(self._on_begin_insert_row, webengine=True))
webengine_manager.begin_remove_row.connect(
functools.partial(self._on_begin_remove_row, webengine=True))
webengine_manager.end_insert_row.connect(self.endInsertRows)
webengine_manager.end_remove_row.connect(self.endRemoveRows)
def _all_downloads(self):
"""Combine downloads from both downloaders."""
if self._webengine_manager is None:
return self._qtnetwork_manager.downloads[:]
else:
return (self._qtnetwork_manager.downloads +
self._webengine_manager.downloads)
def __len__(self):
return len(self._all_downloads())
def __iter__(self):
return iter(self._all_downloads())
def __getitem__(self, idx):
return self._all_downloads()[idx]
def _on_begin_insert_row(self, idx, webengine=False):
log.downloads.debug("_on_begin_insert_row with idx {}, "
"webengine {}".format(idx, webengine))
if idx == -1:
self.beginInsertRows(QModelIndex(), 0, -1)
return
assert idx >= 0, idx
if webengine:
idx += len(self._qtnetwork_manager.downloads)
self.beginInsertRows(QModelIndex(), idx, idx)
def _on_begin_remove_row(self, idx, webengine=False):
log.downloads.debug("_on_begin_remove_row with idx {}, "
"webengine {}".format(idx, webengine))
if idx == -1:
self.beginRemoveRows(QModelIndex(), 0, -1)
return
assert idx >= 0, idx
if webengine:
idx += len(self._qtnetwork_manager.downloads)
self.beginRemoveRows(QModelIndex(), idx, idx)
def _on_data_changed(self, idx, *, webengine):
"""Called when a downloader's data changed.
Args:
start: The first changed index as int.
end: The last changed index as int, or -1 for all indices.
webengine: If given, the QtNetwork download length is added to the
index.
"""
if idx == -1:
start_index = self.index(0, 0)
end_index = self.last_index()
else:
if webengine:
idx += len(self._qtnetwork_manager.downloads)
start_index = self.index(idx, 0)
end_index = self.index(idx, 0)
qtutils.ensure_valid(start_index)
qtutils.ensure_valid(end_index)
self.dataChanged.emit(start_index, end_index)
def _raise_no_download(self, count):
"""Raise an exception that the download doesn't exist.
Args:
count: The index of the download
"""
if not count:
raise cmdutils.CommandError("There's no download!")
raise cmdutils.CommandError("There's no download {}!".format(count))
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', value=cmdutils.Value.count)
def download_cancel(self, all_=False, count=0):
"""Cancel the last/[count]th download.
Args:
all_: Cancel all running downloads
count: The index of the download to cancel.
"""
downloads = self._all_downloads()
if all_:
for download in downloads:
if not download.done:
download.cancel()
else:
try:
download = downloads[count - 1]
except IndexError:
self._raise_no_download(count)
if download.done:
if not count:
count = len(self)
raise cmdutils.CommandError("Download {} is already done!"
.format(count))
download.cancel()
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', value=cmdutils.Value.count)
def download_delete(self, count=0):
"""Delete the last/[count]th download from disk.
Args:
count: The index of the download to delete.
"""
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if not download.successful:
if not count:
count = len(self)
raise cmdutils.CommandError("Download {} is not done!"
.format(count))
download.delete()
download.remove()
log.downloads.debug("deleted download {}".format(download))
@cmdutils.register(instance='download-model', scope='window', maxsplit=0)
@cmdutils.argument('count', value=cmdutils.Value.count)
def download_open(self, cmdline: str = None, count: int = 0) -> None:
"""Open the last/[count]th download.
If no specific command is given, this will use the system's default
application to open the file.
Args:
cmdline: The command which should be used to open the file. A `{}`
is expanded to the temporary file name. If no `{}` is
present, the filename is automatically appended to the
cmdline.
count: The index of the download to open.
"""
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if not download.successful:
if not count:
count = len(self)
raise cmdutils.CommandError("Download {} is not done!"
.format(count))
download.open_file(cmdline)
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', value=cmdutils.Value.count)
def download_retry(self, count=0):
"""Retry the first failed/[count]th download.
Args:
count: The index of the download to retry.
"""
if count:
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if download.successful or not download.done:
raise cmdutils.CommandError("Download {} did not fail!"
.format(count))
else:
to_retry = [d for d in self if d.done and not d.successful]
if not to_retry:
raise cmdutils.CommandError("No failed downloads!")
download = to_retry[0]
download.try_retry()
def can_clear(self):
"""Check if there are finished downloads to clear."""
return any(download.done for download in self)
@cmdutils.register(instance='download-model', scope='window')
def download_clear(self):
"""Remove all finished downloads from the list."""
for download in self:
if download.done:
download.remove()
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', value=cmdutils.Value.count)
def download_remove(self, all_=False, count=0):
"""Remove the last/[count]th download from the list.
Args:
all_: Remove all finished downloads.
count: The index of the download to remove.
"""
if all_:
self.download_clear()
else:
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if not download.done:
if not count:
count = len(self)
raise cmdutils.CommandError("Download {} is not done!"
.format(count))
download.remove()
def running_downloads(self):
"""Return the amount of still running downloads.
Return:
The number of unfinished downloads.
"""
return sum(1 for download in self if not download.done)
def last_index(self):
"""Get the last index in the model.
Return:
A (possibly invalid) QModelIndex.
"""
idx = self.index(self.rowCount() - 1)
return idx
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""Simple constant header."""
if (section == 0 and orientation == Qt.Horizontal and
role == Qt.DisplayRole):
return "Downloads"
else:
return ""
def data(self, index, role):
"""Download data from DownloadManager."""
if not index.isValid():
return None
if index.parent().isValid() or index.column() != 0:
return None
item = self[index.row()]
if role == Qt.DisplayRole:
data = str(item)
elif role == Qt.ForegroundRole:
data = item.get_status_color('fg')
elif role == Qt.BackgroundRole:
data = item.get_status_color('bg')
elif role == ModelRole.item:
data = item
elif role == Qt.ToolTipRole:
if item.error_msg is None:
data = None
else:
return item.error_msg
else:
data = None
return data
def flags(self, index):
"""Override flags so items aren't selectable.
The default would be Qt.ItemIsEnabled | Qt.ItemIsSelectable.
"""
if not index.isValid():
return Qt.ItemFlags()
return Qt.ItemIsEnabled | Qt.ItemNeverHasChildren
def rowCount(self, parent=QModelIndex()):
"""Get count of active downloads."""
if parent.isValid():
# We don't have children
return 0
return len(self)
class TempDownloadManager:
"""Manager to handle temporary download files.
The downloads are downloaded to a temporary location and then openened with
the system standard application. The temporary files are deleted when
qutebrowser is shutdown.
Attributes:
files: A list of NamedTemporaryFiles of downloaded items.
"""
def __init__(self):
self.files = []
self._tmpdir = None
def cleanup(self):
"""Clean up any temporary files."""
if self._tmpdir is not None:
try:
self._tmpdir.cleanup()
except OSError:
log.misc.exception("Failed to clean up temporary download "
"directory")
self._tmpdir = None
def get_tmpdir(self):
"""Return the temporary directory that is used for downloads.
The directory is created lazily on first access.
Return:
The tempfile.TemporaryDirectory that is used.
"""
if self._tmpdir is None:
self._tmpdir = tempfile.TemporaryDirectory(
prefix='qutebrowser-downloads-')
return self._tmpdir
def get_tmpfile(self, suggested_name):
"""Return a temporary file in the temporary downloads directory.
The files are kept as long as qutebrowser is running and automatically
cleaned up at program exit.
Args:
suggested_name: str of the "suggested"/original filename. Used as a
suffix, so any file extenions are preserved.
Return:
A tempfile.NamedTemporaryFile that should be used to save the file.
"""
tmpdir = self.get_tmpdir()
suggested_name = utils.sanitize_filename(suggested_name)
# Make sure that the filename is not too long
suggested_name = utils.elide_filename(suggested_name, 50)
fobj = tempfile.NamedTemporaryFile(dir=tmpdir.name, delete=False,
suffix='_' + suggested_name)
self.files.append(fobj)
return fobj
temp_download_manager = TempDownloadManager()
| 1 | 23,349 | I'm not sure if this is standard practice, but I would prefer to leave this initialization here, if only to show that such a variable exists in this file (instead of inferring it from the globals). It can also cause weird issues if, for example, `init` isn't run before other methods. | qutebrowser-qutebrowser | py |
@@ -353,6 +353,7 @@ RDKit::SparseIntVect<boost::uint32_t> *MorganFingerprintHelper(
res = RDKit::MorganFingerprints::getFingerprint(
mol, static_cast<unsigned int>(radius), invars, froms, useChirality,
useBondTypes, useCounts, false, bitInfoMap);
+ throw "Value of nbits is Negative"
} else {
res = RDKit::MorganFingerprints::getHashedFingerprint(
mol, static_cast<unsigned int>(radius), | 1 | //
// Copyright (C) 2007-2017 Greg Landrum
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#include <RDBoost/Wrap.h>
#include <GraphMol/Atom.h>
#include <GraphMol/GraphMol.h>
#include <RDGeneral/BoostStartInclude.h>
#include <boost/foreach.hpp>
#include <RDGeneral/BoostEndInclude.h>
#include <GraphMol/Descriptors/MolDescriptors.h>
#include <GraphMol/Fingerprints/AtomPairs.h>
#include <GraphMol/Fingerprints/MorganFingerprints.h>
#include <GraphMol/Fingerprints/MACCS.h>
#include <DataStructs/BitVects.h>
#include <GraphMol/Descriptors/USRDescriptor.h>
#ifdef RDK_BUILD_DESCRIPTORS3D
#include <GraphMol/Descriptors/MolDescriptors3D.h>
#endif
#include <vector>
namespace python = boost::python;
namespace {
std::vector<unsigned int> atomPairTypes(
RDKit::AtomPairs::atomNumberTypes,
RDKit::AtomPairs::atomNumberTypes +
sizeof(RDKit::AtomPairs::atomNumberTypes) / sizeof(unsigned int));
python::tuple computeASAContribs(const RDKit::ROMol &mol, bool includeHs = true,
bool force = false) {
std::vector<double> contribs(mol.getNumAtoms());
double hContrib = 0.0;
RDKit::Descriptors::getLabuteAtomContribs(mol, contribs, hContrib, includeHs,
force);
python::tuple pycontribs(contribs);
return python::make_tuple(contribs, hContrib);
}
python::tuple computeTPSAContribs(const RDKit::ROMol &mol, bool force = false) {
std::vector<double> contribs(mol.getNumAtoms());
RDKit::Descriptors::getTPSAAtomContribs(mol, contribs, force);
python::tuple pycontribs(contribs);
return pycontribs;
}
python::list computeCrippenContribs(
const RDKit::ROMol &mol, bool force = false,
python::list atomTypes = python::list(),
python::list atomTypeLabels = python::list()) {
std::vector<unsigned int> *tAtomTypes = 0;
std::vector<std::string> *tAtomTypeLabels = 0;
if (python::extract<unsigned int>(atomTypes.attr("__len__")()) != 0) {
if (python::extract<unsigned int>(atomTypes.attr("__len__")()) !=
mol.getNumAtoms()) {
throw_value_error(
"if atomTypes vector is provided, it must be as long as the number "
"of atoms");
} else {
tAtomTypes = new std::vector<unsigned int>(mol.getNumAtoms(), 0);
}
}
if (python::extract<unsigned int>(atomTypeLabels.attr("__len__")()) != 0) {
if (python::extract<unsigned int>(atomTypeLabels.attr("__len__")()) !=
mol.getNumAtoms()) {
throw_value_error(
"if atomTypeLabels vector is provided, it must be as long as the "
"number of atoms");
} else {
tAtomTypeLabels = new std::vector<std::string>(mol.getNumAtoms(), "");
}
}
std::vector<double> logpContribs(mol.getNumAtoms());
std::vector<double> mrContribs(mol.getNumAtoms());
RDKit::Descriptors::getCrippenAtomContribs(
mol, logpContribs, mrContribs, force, tAtomTypes, tAtomTypeLabels);
python::list pycontribs;
for (unsigned int i = 0; i < mol.getNumAtoms(); ++i) {
pycontribs.append(python::make_tuple(logpContribs[i], mrContribs[i]));
}
if (tAtomTypes) {
for (unsigned int i = 0; i < mol.getNumAtoms(); ++i) {
atomTypes[i] = (*tAtomTypes)[i];
}
delete tAtomTypes;
}
if (tAtomTypeLabels) {
for (unsigned int i = 0; i < mol.getNumAtoms(); ++i) {
atomTypeLabels[i] = (*tAtomTypeLabels)[i];
}
delete tAtomTypeLabels;
}
return pycontribs;
}
python::tuple calcCrippenDescriptors(const RDKit::ROMol &mol,
bool includeHs = true,
bool force = false) {
double logp, mr;
RDKit::Descriptors::calcCrippenDescriptors(mol, logp, mr, includeHs, force);
return python::make_tuple(logp, mr);
}
#ifdef RDK_BUILD_DESCRIPTORS3D
python::list calcWHIMs(const RDKit::ROMol &mol, int confId, double thresh) {
std::vector<double> res;
RDKit::Descriptors::WHIM(mol, res, confId, thresh);
python::list pyres;
BOOST_FOREACH (double iv, res) { pyres.append(iv); }
return pyres;
}
python::list calcGETAWAYs(const RDKit::ROMol &mol, int confId,
double precision) {
std::vector<double> res;
RDKit::Descriptors::GETAWAY(mol, res, confId, precision);
python::list pyres;
BOOST_FOREACH (double iv, res) { pyres.append(iv); }
return pyres;
}
python::list calcRDFs(const RDKit::ROMol &mol, int confId) {
std::vector<double> res;
RDKit::Descriptors::RDF(mol, res, confId);
python::list pyres;
BOOST_FOREACH (double iv, res) { pyres.append(iv); }
return pyres;
}
python::list calcMORSEs(const RDKit::ROMol &mol, int confId) {
std::vector<double> res;
RDKit::Descriptors::MORSE(mol, res, confId);
python::list pyres;
BOOST_FOREACH (double iv, res) { pyres.append(iv); }
return pyres;
}
python::list calcAUTOCORR3Ds(const RDKit::ROMol &mol, int confId) {
std::vector<double> res;
RDKit::Descriptors::AUTOCORR3D(mol, res, confId);
python::list pyres;
BOOST_FOREACH (double iv, res) { pyres.append(iv); }
return pyres;
}
python::list calcAUTOCORR2Ds(const RDKit::ROMol &mol) {
std::vector<double> res;
RDKit::Descriptors::AUTOCORR2D(mol, res);
python::list pyres;
BOOST_FOREACH (double iv, res) { pyres.append(iv); }
return pyres;
}
#endif
RDKit::SparseIntVect<boost::int32_t> *GetAtomPairFingerprint(
const RDKit::ROMol &mol, unsigned int minLength, unsigned int maxLength,
python::object fromAtoms, python::object ignoreAtoms,
python::object atomInvariants, bool includeChirality, bool use2D,
int confId) {
rdk_auto_ptr<std::vector<boost::uint32_t> > fvect =
pythonObjectToVect(fromAtoms, mol.getNumAtoms());
rdk_auto_ptr<std::vector<boost::uint32_t> > ivect =
pythonObjectToVect(ignoreAtoms, mol.getNumAtoms());
rdk_auto_ptr<std::vector<boost::uint32_t> > invvect = pythonObjectToVect(
atomInvariants,
static_cast<unsigned int>(1 << RDKit::AtomPairs::codeSize));
RDKit::SparseIntVect<boost::int32_t> *res;
res = RDKit::AtomPairs::getAtomPairFingerprint(
mol, minLength, maxLength, fvect.get(), ivect.get(), invvect.get(),
includeChirality, use2D, confId);
return res;
}
RDKit::SparseIntVect<boost::int32_t> *GetHashedAtomPairFingerprint(
const RDKit::ROMol &mol, unsigned int nBits, unsigned int minLength,
unsigned int maxLength, python::object fromAtoms,
python::object ignoreAtoms, python::object atomInvariants,
bool includeChirality, bool use2D, int confId) {
rdk_auto_ptr<std::vector<boost::uint32_t> > fvect =
pythonObjectToVect(fromAtoms, mol.getNumAtoms());
rdk_auto_ptr<std::vector<boost::uint32_t> > ivect =
pythonObjectToVect(ignoreAtoms, mol.getNumAtoms());
rdk_auto_ptr<std::vector<boost::uint32_t> > invvect = pythonObjectToVect(
atomInvariants,
static_cast<unsigned int>(1 << RDKit::AtomPairs::codeSize));
RDKit::SparseIntVect<boost::int32_t> *res;
res = RDKit::AtomPairs::getHashedAtomPairFingerprint(
mol, nBits, minLength, maxLength, fvect.get(), ivect.get(), invvect.get(),
includeChirality, use2D, confId);
return res;
}
RDKit::SparseIntVect<boost::int64_t> *GetTopologicalTorsionFingerprint(
const RDKit::ROMol &mol, unsigned int targetSize, python::object fromAtoms,
python::object ignoreAtoms, python::object atomInvariants,
bool includeChirality) {
rdk_auto_ptr<std::vector<boost::uint32_t> > fvect =
pythonObjectToVect(fromAtoms, mol.getNumAtoms());
rdk_auto_ptr<std::vector<boost::uint32_t> > ivect =
pythonObjectToVect(ignoreAtoms, mol.getNumAtoms());
rdk_auto_ptr<std::vector<boost::uint32_t> > invvect = pythonObjectToVect(
atomInvariants,
static_cast<unsigned int>(1 << RDKit::AtomPairs::codeSize));
if (targetSize * RDKit::AtomPairs::codeSize > 64) {
std::ostringstream errout;
errout << "Maximum supported topological torsion path length is "
<< 64 / RDKit::AtomPairs::codeSize << std::endl;
throw_value_error(errout.str());
}
RDKit::SparseIntVect<boost::int64_t> *res;
res = RDKit::AtomPairs::getTopologicalTorsionFingerprint(
mol, targetSize, fvect.get(), ivect.get(), invvect.get(),
includeChirality);
return res;
}
RDKit::SparseIntVect<boost::int64_t> *GetHashedTopologicalTorsionFingerprint(
const RDKit::ROMol &mol, unsigned int nBits, unsigned int targetSize,
python::object fromAtoms, python::object ignoreAtoms,
python::object atomInvariants, bool includeChirality) {
rdk_auto_ptr<std::vector<boost::uint32_t> > fvect =
pythonObjectToVect(fromAtoms, mol.getNumAtoms());
rdk_auto_ptr<std::vector<boost::uint32_t> > ivect =
pythonObjectToVect(ignoreAtoms, mol.getNumAtoms());
rdk_auto_ptr<std::vector<boost::uint32_t> > invvect = pythonObjectToVect(
atomInvariants,
static_cast<unsigned int>(1 << RDKit::AtomPairs::codeSize));
RDKit::SparseIntVect<boost::int64_t> *res;
res = RDKit::AtomPairs::getHashedTopologicalTorsionFingerprint(
mol, nBits, targetSize, fvect.get(), ivect.get(), invvect.get(),
includeChirality);
return res;
}
ExplicitBitVect *GetHashedTopologicalTorsionFingerprintAsBitVect(
const RDKit::ROMol &mol, unsigned int nBits, unsigned int targetSize,
python::object fromAtoms, python::object ignoreAtoms,
python::object atomInvariants, unsigned int nBitsPerEntry,
bool includeChirality) {
rdk_auto_ptr<std::vector<boost::uint32_t> > fvect =
pythonObjectToVect(fromAtoms, mol.getNumAtoms());
rdk_auto_ptr<std::vector<boost::uint32_t> > ivect =
pythonObjectToVect(ignoreAtoms, mol.getNumAtoms());
rdk_auto_ptr<std::vector<boost::uint32_t> > invvect = pythonObjectToVect(
atomInvariants,
static_cast<unsigned int>(1 << RDKit::AtomPairs::codeSize));
ExplicitBitVect *res;
res = RDKit::AtomPairs::getHashedTopologicalTorsionFingerprintAsBitVect(
mol, nBits, targetSize, fvect.get(), ivect.get(), invvect.get(),
nBitsPerEntry, includeChirality);
return res;
}
ExplicitBitVect *GetHashedAtomPairFingerprintAsBitVect(
const RDKit::ROMol &mol, unsigned int nBits, unsigned int minLength,
unsigned int maxLength, python::object fromAtoms,
python::object ignoreAtoms, python::object atomInvariants,
unsigned int nBitsPerEntry, bool includeChirality, bool use2D, int confId) {
rdk_auto_ptr<std::vector<boost::uint32_t> > fvect =
pythonObjectToVect(fromAtoms, mol.getNumAtoms());
rdk_auto_ptr<std::vector<boost::uint32_t> > ivect =
pythonObjectToVect(ignoreAtoms, mol.getNumAtoms());
rdk_auto_ptr<std::vector<boost::uint32_t> > invvect = pythonObjectToVect(
atomInvariants,
static_cast<unsigned int>(1 << RDKit::AtomPairs::codeSize));
ExplicitBitVect *res;
res = RDKit::AtomPairs::getHashedAtomPairFingerprintAsBitVect(
mol, nBits, minLength, maxLength, fvect.get(), ivect.get(), invvect.get(),
nBitsPerEntry, includeChirality, use2D, confId);
return res;
}
namespace {
double kappaHelper(double (*fn)(const RDKit::ROMol &, std::vector<double> *),
const RDKit::ROMol &mol, python::object atomContribs) {
std::vector<double> *lContribs = 0;
if (atomContribs != python::object()) {
// make sure the optional argument actually was a list
python::list typecheck = python::extract<python::list>(atomContribs);
if (python::extract<unsigned int>(typecheck.attr("__len__")()) !=
mol.getNumAtoms()) {
throw_value_error("length of atomContribs list != number of atoms");
}
lContribs = new std::vector<double>(mol.getNumAtoms());
}
double res = fn(mol, lContribs);
if (lContribs) {
python::list acl = python::extract<python::list>(atomContribs);
for (unsigned int i = 0; i < mol.getNumAtoms(); ++i) {
acl[i] = (*lContribs)[i];
}
delete lContribs;
}
return res;
}
double hkAlphaHelper(const RDKit::ROMol &mol, python::object atomContribs) {
return kappaHelper(RDKit::Descriptors::calcHallKierAlpha, mol, atomContribs);
}
RDKit::SparseIntVect<boost::uint32_t> *MorganFingerprintHelper(
const RDKit::ROMol &mol, int radius, int nBits, python::object invariants,
python::object fromAtoms, bool useChirality, bool useBondTypes,
bool useFeatures, bool useCounts, python::object bitInfo) {
std::vector<boost::uint32_t> *invars = 0;
if (invariants) {
unsigned int nInvar =
python::extract<unsigned int>(invariants.attr("__len__")());
if (nInvar) {
if (nInvar != mol.getNumAtoms()) {
throw_value_error("length of invariant vector != number of atoms");
}
invars = new std::vector<boost::uint32_t>(mol.getNumAtoms());
for (unsigned int i = 0; i < mol.getNumAtoms(); ++i) {
(*invars)[i] = python::extract<boost::uint32_t>(invariants[i]);
}
}
} else if (useFeatures) {
invars = new std::vector<boost::uint32_t>(mol.getNumAtoms());
RDKit::MorganFingerprints::getFeatureInvariants(mol, *invars);
}
std::vector<boost::uint32_t> *froms = 0;
if (fromAtoms) {
unsigned int nFrom =
python::extract<unsigned int>(fromAtoms.attr("__len__")());
if (nFrom) {
froms = new std::vector<boost::uint32_t>();
for (unsigned int i = 0; i < nFrom; ++i) {
froms->push_back(python::extract<boost::uint32_t>(fromAtoms[i]));
}
}
}
RDKit::MorganFingerprints::BitInfoMap *bitInfoMap = 0;
if (bitInfo != python::object()) {
// make sure the optional argument actually was a dictionary
python::dict typecheck = python::extract<python::dict>(bitInfo);
bitInfoMap = new RDKit::MorganFingerprints::BitInfoMap();
}
RDKit::SparseIntVect<boost::uint32_t> *res;
if (nBits < 0) {
res = RDKit::MorganFingerprints::getFingerprint(
mol, static_cast<unsigned int>(radius), invars, froms, useChirality,
useBondTypes, useCounts, false, bitInfoMap);
} else {
res = RDKit::MorganFingerprints::getHashedFingerprint(
mol, static_cast<unsigned int>(radius),
static_cast<unsigned int>(nBits), invars, froms, useChirality,
useBondTypes, false, bitInfoMap);
}
if (bitInfoMap) {
bitInfo.attr("clear")();
for (RDKit::MorganFingerprints::BitInfoMap::const_iterator iter =
bitInfoMap->begin();
iter != bitInfoMap->end(); ++iter) {
const std::vector<std::pair<boost::uint32_t, boost::uint32_t> > &v =
iter->second;
python::list localL;
for (std::vector<std::pair<boost::uint32_t, boost::uint32_t> >::
const_iterator vIt = v.begin();
vIt != v.end(); ++vIt) {
localL.append(python::make_tuple(vIt->first, vIt->second));
}
bitInfo[iter->first] = python::tuple(localL);
}
delete bitInfoMap;
}
if (invars) delete invars;
if (froms) delete froms;
return res;
}
}
RDKit::SparseIntVect<boost::uint32_t> *GetMorganFingerprint(
const RDKit::ROMol &mol, int radius, python::object invariants,
python::object fromAtoms, bool useChirality, bool useBondTypes,
bool useFeatures, bool useCounts, python::object bitInfo) {
return MorganFingerprintHelper(mol, radius, -1, invariants, fromAtoms,
useChirality, useBondTypes, useFeatures,
useCounts, bitInfo);
}
RDKit::SparseIntVect<boost::uint32_t> *GetHashedMorganFingerprint(
const RDKit::ROMol &mol, int radius, int nBits, python::object invariants,
python::object fromAtoms, bool useChirality, bool useBondTypes,
bool useFeatures, python::object bitInfo) {
return MorganFingerprintHelper(mol, radius, nBits, invariants, fromAtoms,
useChirality, useBondTypes, useFeatures, true,
bitInfo);
}
ExplicitBitVect *GetMorganFingerprintBV(
const RDKit::ROMol &mol, int radius, unsigned int nBits,
python::object invariants, python::object fromAtoms, bool useChirality,
bool useBondTypes, bool useFeatures, python::object bitInfo) {
std::vector<boost::uint32_t> *invars = 0;
if (invariants) {
unsigned int nInvar =
python::extract<unsigned int>(invariants.attr("__len__")());
if (nInvar) {
if (nInvar != mol.getNumAtoms()) {
throw_value_error("length of invariant vector != number of atoms");
}
invars = new std::vector<boost::uint32_t>(mol.getNumAtoms());
for (unsigned int i = 0; i < mol.getNumAtoms(); ++i) {
(*invars)[i] = python::extract<boost::uint32_t>(invariants[i]);
}
}
} else if (useFeatures) {
invars = new std::vector<boost::uint32_t>(mol.getNumAtoms());
RDKit::MorganFingerprints::getFeatureInvariants(mol, *invars);
}
rdk_auto_ptr<std::vector<boost::uint32_t> > froms =
pythonObjectToVect(fromAtoms, mol.getNumAtoms());
RDKit::MorganFingerprints::BitInfoMap *bitInfoMap = 0;
if (bitInfo != python::object()) {
// make sure the optional argument actually was a dictionary
python::dict typecheck = python::extract<python::dict>(bitInfo);
bitInfoMap = new RDKit::MorganFingerprints::BitInfoMap();
}
ExplicitBitVect *res;
res = RDKit::MorganFingerprints::getFingerprintAsBitVect(
mol, static_cast<unsigned int>(radius), nBits, invars, froms.get(),
useChirality, useBondTypes, false, bitInfoMap);
if (bitInfoMap) {
bitInfo.attr("clear")();
for (RDKit::MorganFingerprints::BitInfoMap::const_iterator iter =
bitInfoMap->begin();
iter != bitInfoMap->end(); ++iter) {
const std::vector<std::pair<boost::uint32_t, boost::uint32_t> > &v =
iter->second;
python::list localL;
for (std::vector<std::pair<boost::uint32_t, boost::uint32_t> >::
const_iterator vIt = v.begin();
vIt != v.end(); ++vIt) {
localL.append(python::make_tuple(vIt->first, vIt->second));
}
bitInfo[iter->first] = python::tuple(localL);
}
delete bitInfoMap;
}
delete invars;
return res;
}
python::list GetConnectivityInvariants(const RDKit::ROMol &mol,
bool includeRingMembership) {
std::vector<boost::uint32_t> invars(mol.getNumAtoms());
RDKit::MorganFingerprints::getConnectivityInvariants(mol, invars,
includeRingMembership);
python::list res;
BOOST_FOREACH (boost::uint32_t iv, invars) { res.append(python::long_(iv)); }
return res;
}
python::list GetFeatureInvariants(const RDKit::ROMol &mol) {
std::vector<boost::uint32_t> invars(mol.getNumAtoms());
RDKit::MorganFingerprints::getFeatureInvariants(mol, invars);
python::list res;
BOOST_FOREACH (boost::uint32_t iv, invars) { res.append(python::long_(iv)); }
return res;
}
python::list GetUSR(const RDKit::ROMol &mol, int confId) {
if (mol.getNumConformers() == 0) {
throw_value_error("no conformers");
}
if (mol.getNumAtoms() < 3) {
throw_value_error("too few atoms (minimum three)");
}
std::vector<double> descriptor(12);
RDKit::Descriptors::USR(mol, descriptor, confId);
python::list pyDescr;
BOOST_FOREACH (double d, descriptor) { pyDescr.append(d); }
return pyDescr;
}
python::list GetUSRDistributions(python::object coords, python::object points) {
unsigned int numCoords =
python::extract<unsigned int>(coords.attr("__len__")());
if (numCoords == 0) {
throw_value_error("no coordinates");
}
RDGeom::Point3DConstPtrVect c(numCoords);
for (unsigned int i = 0; i < numCoords; ++i) {
RDGeom::Point3D *pt = new RDGeom::Point3D;
*pt = python::extract<RDGeom::Point3D>(coords[i]);
c[i] = pt;
}
std::vector<RDGeom::Point3D> pts(4);
std::vector<std::vector<double> > distances(4);
RDKit::Descriptors::calcUSRDistributions(c, distances, pts);
if (points != python::object()) {
// make sure the optional argument actually was a list
python::list tmpPts = python::extract<python::list>(points);
BOOST_FOREACH (RDGeom::Point3D p, pts) { tmpPts.append(p); }
points = tmpPts;
}
python::list pyDist;
BOOST_FOREACH (std::vector<double> dist, distances) {
python::list pytmp;
BOOST_FOREACH (double d, dist) { pytmp.append(d); }
pyDist.append(pytmp);
}
BOOST_FOREACH (const RDGeom::Point3D *pt, c) { delete pt; }
return pyDist;
}
python::list GetUSRDistributionsFromPoints(python::object coords,
python::object points) {
unsigned int numCoords =
python::extract<unsigned int>(coords.attr("__len__")());
unsigned int numPts = python::extract<unsigned int>(points.attr("__len__")());
if (numCoords == 0) {
throw_value_error("no coordinates");
}
RDGeom::Point3DConstPtrVect c(numCoords);
for (unsigned int i = 0; i < numCoords; ++i) {
RDGeom::Point3D *pt = new RDGeom::Point3D;
*pt = python::extract<RDGeom::Point3D>(coords[i]);
c[i] = pt;
}
std::vector<RDGeom::Point3D> p(numPts);
if (numPts == 0) {
throw_value_error("no points");
}
for (unsigned int i = 0; i < numPts; ++i) {
p[i] = python::extract<RDGeom::Point3D>(points[i]);
}
std::vector<std::vector<double> > distances(numPts);
RDKit::Descriptors::calcUSRDistributionsFromPoints(c, p, distances);
python::list pyDist;
BOOST_FOREACH (std::vector<double> dist, distances) {
python::list pytmp;
BOOST_FOREACH (double d, dist) { pytmp.append(d); }
pyDist.append(pytmp);
}
BOOST_FOREACH (const RDGeom::Point3D *pt, c) { delete pt; }
return pyDist;
}
python::list GetUSRFromDistributions(python::object distances) {
unsigned int numDist =
python::extract<unsigned int>(distances.attr("__len__")());
if (numDist == 0) {
throw_value_error("no distances");
}
std::vector<std::vector<double> > dist(numDist);
for (unsigned int i = 0; i < numDist; ++i) {
unsigned int numPts =
python::extract<unsigned int>(distances[i].attr("__len__")());
if (numPts == 0) {
throw_value_error("distances missing");
}
std::vector<double> tmpDist(numPts);
for (unsigned int j = 0; j < numPts; ++j) {
tmpDist[j] = python::extract<double>(distances[i][j]);
}
dist[i] = tmpDist;
}
std::vector<double> descriptor(12);
RDKit::Descriptors::calcUSRFromDistributions(dist, descriptor);
python::list pyDescr;
BOOST_FOREACH (double d, descriptor) { pyDescr.append(d); }
return pyDescr;
}
double GetUSRScore(python::object descriptor1, python::object descriptor2,
python::object weights) {
unsigned int numElements =
python::extract<unsigned int>(descriptor1.attr("__len__")());
if (numElements !=
python::extract<unsigned int>(descriptor2.attr("__len__")())) {
throw_value_error("descriptors must have the same length");
}
unsigned int numWeights = numElements / 12;
unsigned int numPyWeights =
python::extract<unsigned int>(weights.attr("__len__")());
std::vector<double> w(numWeights, 1.0); // default weights: all to 1.0
if ((numPyWeights > 0) && (numPyWeights != numWeights)) {
throw_value_error("number of weights is not correct");
} else if (numPyWeights == numWeights) {
for (unsigned int i = 0; i < numWeights; ++i) {
w[i] = python::extract<double>(weights[i]);
}
}
std::vector<double> d1(numElements);
std::vector<double> d2(numElements);
for (unsigned int i = 0; i < numElements; ++i) {
d1[i] = python::extract<double>(descriptor1[i]);
d2[i] = python::extract<double>(descriptor2[i]);
}
double res = RDKit::Descriptors::calcUSRScore(d1, d2, w);
return res;
}
python::list GetUSRCAT(const RDKit::ROMol &mol, python::object atomSelections,
int confId) {
if (mol.getNumConformers() == 0) {
throw_value_error("no conformers");
}
if (mol.getNumAtoms() < 3) {
throw_value_error("too few atoms (minimum three)");
}
// check if there is an atom selection provided
std::vector<std::vector<unsigned int> > atomIds;
unsigned int sizeDescriptor = 60;
if (atomSelections != python::object()) {
// make sure the optional argument actually was a list
python::list typecheck = python::extract<python::list>(atomSelections);
unsigned int numSel =
python::extract<unsigned int>(atomSelections.attr("__len__")());
if (numSel == 0) {
throw_value_error("empty atom selections");
}
atomIds.resize(numSel);
for (unsigned int i = 0; i < numSel; ++i) {
unsigned int numPts =
python::extract<unsigned int>(atomSelections[i].attr("__len__")());
std::vector<unsigned int> tmpIds(numPts);
for (unsigned int j = 0; j < numPts; ++j) {
tmpIds[j] = python::extract<unsigned int>(atomSelections[i][j]) - 1;
}
atomIds[i] = tmpIds;
}
sizeDescriptor = 12 * (numSel + 1);
}
std::vector<double> descriptor(sizeDescriptor);
RDKit::Descriptors::USRCAT(mol, descriptor, atomIds, confId);
python::list pyDescr;
BOOST_FOREACH (double d, descriptor) { pyDescr.append(d); }
return pyDescr;
}
python::list CalcSlogPVSA(const RDKit::ROMol &mol, python::object bins,
bool force) {
std::vector<double> *lbins = 0;
if (bins) {
unsigned int nBins = python::extract<unsigned int>(bins.attr("__len__")());
if (nBins) {
lbins = new std::vector<double>(nBins, 0.0);
for (unsigned int i = 0; i < nBins; ++i) {
(*lbins)[i] = python::extract<double>(bins[i]);
}
}
}
std::vector<double> res;
res = RDKit::Descriptors::calcSlogP_VSA(mol, lbins, force);
python::list pyres;
BOOST_FOREACH (double dv, res) { pyres.append(dv); }
return pyres;
}
python::list CalcSMRVSA(const RDKit::ROMol &mol, python::object bins,
bool force) {
std::vector<double> *lbins = 0;
if (bins) {
unsigned int nBins = python::extract<unsigned int>(bins.attr("__len__")());
if (nBins) {
lbins = new std::vector<double>(nBins, 0.0);
for (unsigned int i = 0; i < nBins; ++i) {
(*lbins)[i] = python::extract<double>(bins[i]);
}
}
}
std::vector<double> res;
res = RDKit::Descriptors::calcSMR_VSA(mol, lbins, force);
python::list pyres;
BOOST_FOREACH (double dv, res) { pyres.append(dv); }
return pyres;
}
python::list CalcPEOEVSA(const RDKit::ROMol &mol, python::object bins,
bool force) {
std::vector<double> *lbins = 0;
if (bins) {
unsigned int nBins = python::extract<unsigned int>(bins.attr("__len__")());
if (nBins) {
lbins = new std::vector<double>(nBins, 0.0);
for (unsigned int i = 0; i < nBins; ++i) {
(*lbins)[i] = python::extract<double>(bins[i]);
}
}
}
std::vector<double> res;
res = RDKit::Descriptors::calcPEOE_VSA(mol, lbins, force);
python::list pyres;
BOOST_FOREACH (double dv, res) { pyres.append(dv); }
return pyres;
}
python::list CalcMQNs(const RDKit::ROMol &mol, bool force) {
std::vector<unsigned int> res;
res = RDKit::Descriptors::calcMQNs(mol, force);
python::list pyres;
BOOST_FOREACH (unsigned int iv, res) { pyres.append(iv); }
return pyres;
}
unsigned int numSpiroAtoms(const RDKit::ROMol &mol, python::object pyatoms) {
std::vector<unsigned int> ats;
unsigned int res = RDKit::Descriptors::calcNumSpiroAtoms(
mol, pyatoms != python::object() ? &ats : NULL);
if (pyatoms != python::object()) {
python::list pyres = python::extract<python::list>(pyatoms);
BOOST_FOREACH (unsigned int iv, ats) { pyres.append(iv); }
}
return res;
}
unsigned int numBridgeheadAtoms(const RDKit::ROMol &mol,
python::object pyatoms) {
std::vector<unsigned int> ats;
unsigned int res = RDKit::Descriptors::calcNumBridgeheadAtoms(
mol, pyatoms != python::object() ? &ats : NULL);
if (pyatoms != python::object()) {
python::list pyres = python::extract<python::list>(pyatoms);
BOOST_FOREACH (unsigned int iv, ats) { pyres.append(iv); }
}
return res;
}
/// Awesome StackOverflow response:
/// http://stackoverflow.com/questions/15842126/feeding-a-python-list-into-a-function-taking-in-a-vector-with-boost-python
/// I know a lot more about how boost works.
/// @brief Type that allows for registration of conversions from
/// python iterable types.
struct iterable_converter {
/// @note Registers converter from a python interable type to the
/// provided type.
template <typename Container>
iterable_converter &from_python() {
boost::python::converter::registry::push_back(
&iterable_converter::convertible,
&iterable_converter::construct<Container>,
boost::python::type_id<Container>());
// Support chaining.
return *this;
}
/// @brief Check if PyObject is iterable.
static void *convertible(PyObject *object) {
return PyObject_GetIter(object) ? object : NULL;
}
/// @brief Convert iterable PyObject to C++ container type.
///
/// Container Concept requirements:
///
/// * Container::value_type is CopyConstructable.
/// * Container can be constructed and populated with two iterators.
/// I.e. Container(begin, end)
template <typename Container>
static void construct(
PyObject *object,
boost::python::converter::rvalue_from_python_stage1_data *data) {
namespace python = boost::python;
// Object is a borrowed reference, so create a handle indicting it is
// borrowed for proper reference counting.
python::handle<> handle(python::borrowed(object));
// Obtain a handle to the memory block that the converter has allocated
// for the C++ type.
typedef python::converter::rvalue_from_python_storage<Container>
storage_type;
void *storage = reinterpret_cast<storage_type *>(data)->storage.bytes;
typedef python::stl_input_iterator<typename Container::value_type> iterator;
// Allocate the C++ type into the converter's memory block, and assign
// its handle to the converter's convertible variable. The C++
// container is populated by passing the begin and end iterators of
// the python object to the container's constructor.
new (storage) Container(iterator(python::object(handle)), // begin
iterator()); // end
data->convertible = storage;
}
};
struct PythonPropertyFunctor : public RDKit::Descriptors::PropertyFunctor {
PyObject *self;
// n.b. until we switch the query d_dataFunc over to boost::function
// we can't use python props in functions.
PythonPropertyFunctor(PyObject *self, const std::string &name,
const std::string &version)
: PropertyFunctor(name, version), self(self) {
python::incref(self);
}
~PythonPropertyFunctor() { python::decref(self); }
double operator()(const RDKit::ROMol &mol) const {
return python::call_method<double>(self, "__call__", boost::ref(mol));
}
};
}
BOOST_PYTHON_MODULE(rdMolDescriptors) {
python::scope().attr("__doc__") =
"Module containing functions to compute molecular descriptors";
std::string docString = "";
python::class_<python::object>("AtomPairsParameters")
.setattr("version", RDKit::AtomPairs::atomPairsVersion)
.setattr("numTypeBits", RDKit::AtomPairs::numTypeBits)
.setattr("numPiBits", RDKit::AtomPairs::numPiBits)
.setattr("numBranchBits", RDKit::AtomPairs::numBranchBits)
.setattr("numChiralBits", RDKit::AtomPairs::numChiralBits)
.setattr("codeSize", RDKit::AtomPairs::codeSize)
.setattr("atomTypes", atomPairTypes)
.setattr("numPathBits", RDKit::AtomPairs::numPathBits)
.setattr("numAtomPairFingerprintBits",
RDKit::AtomPairs::numAtomPairFingerprintBits);
docString = "Returns the atom code (hash) for an atom";
python::def("GetAtomPairAtomCode", RDKit::AtomPairs::getAtomCode,
(python::arg("atom"), python::arg("branchSubtract") = 0,
python::arg("includeChirality") = false),
docString.c_str());
docString =
"Returns the atom-pair code (hash) for a pair of atoms separated by a "
"certain number of bonds";
python::def(
"GetAtomPairCode", RDKit::AtomPairs::getAtomPairCode,
(python::arg("atom1Code"), python::arg("atom2Code"),
python::arg("distance"), python::arg("includeChirality") = false),
docString.c_str());
docString =
"Returns the atom-pair fingerprint for a molecule as an IntSparseIntVect";
python::def("GetAtomPairFingerprint", GetAtomPairFingerprint,
(python::arg("mol"), python::arg("minLength") = 1,
python::arg("maxLength") = RDKit::AtomPairs::maxPathLen - 1,
python::arg("fromAtoms") = 0, python::arg("ignoreAtoms") = 0,
python::arg("atomInvariants") = 0,
python::arg("includeChirality") = false,
python::arg("use2D") = true, python::arg("confId") = -1),
docString.c_str(),
python::return_value_policy<python::manage_new_object>());
docString =
"Returns the hashed atom-pair fingerprint for a molecule as an "
"IntSparseIntVect";
python::def("GetHashedAtomPairFingerprint", GetHashedAtomPairFingerprint,
(python::arg("mol"), python::arg("nBits") = 2048,
python::arg("minLength") = 1,
python::arg("maxLength") = RDKit::AtomPairs::maxPathLen - 1,
python::arg("fromAtoms") = 0, python::arg("ignoreAtoms") = 0,
python::arg("atomInvariants") = 0,
python::arg("includeChirality") = false,
python::arg("use2D") = true, python::arg("confId") = -1),
docString.c_str(),
python::return_value_policy<python::manage_new_object>());
docString =
"Returns the atom-pair fingerprint for a molecule as an ExplicitBitVect";
python::def(
"GetHashedAtomPairFingerprintAsBitVect",
GetHashedAtomPairFingerprintAsBitVect,
(python::arg("mol"), python::arg("nBits") = 2048,
python::arg("minLength") = 1,
python::arg("maxLength") = RDKit::AtomPairs::maxPathLen - 1,
python::arg("fromAtoms") = 0, python::arg("ignoreAtoms") = 0,
python::arg("atomInvariants") = 0, python::arg("nBitsPerEntry") = 4,
python::arg("includeChirality") = false, python::arg("use2D") = true,
python::arg("confId") = -1),
docString.c_str(),
python::return_value_policy<python::manage_new_object>());
docString =
"Returns the topological-torsion fingerprint for a molecule as a "
"LongIntSparseIntVect";
python::def("GetTopologicalTorsionFingerprint",
GetTopologicalTorsionFingerprint,
(python::arg("mol"), python::arg("targetSize") = 4,
python::arg("fromAtoms") = 0, python::arg("ignoreAtoms") = 0,
python::arg("atomInvariants") = 0,
python::arg("includeChirality") = false),
docString.c_str(),
python::return_value_policy<python::manage_new_object>());
docString =
"Returns the hashed topological-torsion fingerprint for a molecule as a "
"LongIntSparseIntVect";
python::def(
"GetHashedTopologicalTorsionFingerprint",
GetHashedTopologicalTorsionFingerprint,
(python::arg("mol"), python::arg("nBits") = 2048,
python::arg("targetSize") = 4, python::arg("fromAtoms") = 0,
python::arg("ignoreAtoms") = 0, python::arg("atomInvariants") = 0,
python::arg("includeChirality") = false),
docString.c_str(),
python::return_value_policy<python::manage_new_object>());
docString =
"Returns the topological-torsion fingerprint for a molecule as an "
"ExplicitBitVect";
python::def(
"GetHashedTopologicalTorsionFingerprintAsBitVect",
GetHashedTopologicalTorsionFingerprintAsBitVect,
(python::arg("mol"), python::arg("nBits") = 2048,
python::arg("targetSize") = 4, python::arg("fromAtoms") = 0,
python::arg("ignoreAtoms") = 0, python::arg("atomInvariants") = 0,
python::arg("nBitsPerEntry") = 4,
python::arg("includeChirality") = false),
docString.c_str(),
python::return_value_policy<python::manage_new_object>());
docString = "Returns a Morgan fingerprint for a molecule";
python::def(
"GetMorganFingerprint", GetMorganFingerprint,
(python::arg("mol"), python::arg("radius"),
python::arg("invariants") = python::list(),
python::arg("fromAtoms") = python::list(),
python::arg("useChirality") = false, python::arg("useBondTypes") = true,
python::arg("useFeatures") = false, python::arg("useCounts") = true,
python::arg("bitInfo") = python::object()),
docString.c_str(),
python::return_value_policy<python::manage_new_object>());
docString = "Returns a hashed Morgan fingerprint for a molecule";
python::def(
"GetHashedMorganFingerprint", GetHashedMorganFingerprint,
(python::arg("mol"), python::arg("radius"), python::arg("nBits") = 2048,
python::arg("invariants") = python::list(),
python::arg("fromAtoms") = python::list(),
python::arg("useChirality") = false, python::arg("useBondTypes") = true,
python::arg("useFeatures") = false,
python::arg("bitInfo") = python::object()),
docString.c_str(),
python::return_value_policy<python::manage_new_object>());
docString = "Returns a Morgan fingerprint for a molecule as a bit vector";
python::def(
"GetMorganFingerprintAsBitVect", GetMorganFingerprintBV,
(python::arg("mol"), python::arg("radius"), python::arg("nBits") = 2048,
python::arg("invariants") = python::list(),
python::arg("fromAtoms") = python::list(),
python::arg("useChirality") = false, python::arg("useBondTypes") = true,
python::arg("useFeatures") = false,
python::arg("bitInfo") = python::object()),
docString.c_str(),
python::return_value_policy<python::manage_new_object>());
python::scope().attr("_MorganFingerprint_version") =
RDKit::MorganFingerprints::morganFingerprintVersion;
docString = "Returns connectivity invariants (ECFP-like) for a molecule.";
python::def("GetConnectivityInvariants", GetConnectivityInvariants,
(python::arg("mol"), python::arg("includeRingMembership") = true),
docString.c_str());
python::scope().attr("_ConnectivityInvariants_version") =
RDKit::MorganFingerprints::morganConnectivityInvariantVersion;
docString = "Returns feature invariants (FCFP-like) for a molecule.";
python::def("GetFeatureInvariants", GetFeatureInvariants,
(python::arg("mol")), docString.c_str());
python::scope().attr("_FeatureInvariants_version") =
RDKit::MorganFingerprints::morganFeatureInvariantVersion;
// USR descriptor
docString = "Returns a USR descriptor for one conformer of a molecule";
python::def("GetUSR", GetUSR,
(python::arg("mol"), python::arg("confId") = -1),
docString.c_str());
docString =
"Returns the four USR distance distributions for a set of coordinates";
python::def("GetUSRDistributions", GetUSRDistributions,
(python::arg("coords"), python::arg("points") = python::object()),
docString.c_str());
docString =
"Returns the USR distance distributions for a set of coordinates and "
"points";
python::def("GetUSRDistributionsFromPoints", GetUSRDistributionsFromPoints,
(python::arg("coords"), python::arg("points")),
docString.c_str());
docString = "Returns the USR descriptor from a set of distance distributions";
python::def("GetUSRFromDistributions", GetUSRFromDistributions,
(python::arg("distances")), docString.c_str());
docString = "Returns the USR score for two USR or USRCAT descriptors";
python::def("GetUSRScore", GetUSRScore,
(python::arg("descriptor1"), python::arg("descriptor2"),
python::arg("weights") = python::list()),
docString.c_str());
docString = "Returns a USRCAT descriptor for one conformer of a molecule";
python::def(
"GetUSRCAT", GetUSRCAT,
(python::arg("mol"), python::arg("atomSelections") = python::object(),
python::arg("confId") = -1),
docString.c_str());
docString =
"returns (as a list of 2-tuples) the contributions of each atom to\n"
"the Wildman-Cripppen logp and mr value";
python::def("_CalcCrippenContribs", computeCrippenContribs,
(python::arg("mol"), python::arg("force") = false,
python::arg("atomTypes") = python::list(),
python::arg("atomTypeLabels") = python::list()),
docString.c_str());
docString = "returns a 2-tuple with the Wildman-Crippen logp,mr values";
python::def("CalcCrippenDescriptors", calcCrippenDescriptors,
(python::arg("mol"), python::arg("includeHs") = true,
python::arg("force") = false),
docString.c_str());
python::scope().attr("_CalcCrippenDescriptors_version") =
RDKit::Descriptors::crippenVersion;
docString = "returns the Labute ASA value for a molecule";
python::def("CalcLabuteASA", RDKit::Descriptors::calcLabuteASA,
(python::arg("mol"), python::arg("includeHs") = true,
python::arg("force") = false),
docString.c_str());
python::scope().attr("_CalcLabuteASA_version") =
RDKit::Descriptors::labuteASAVersion;
docString = "returns a list of atomic contributions to the Labute ASA";
python::def("_CalcLabuteASAContribs", computeASAContribs,
(python::arg("mol"), python::arg("includeHs") = true,
python::arg("force") = false),
docString.c_str());
docString = "returns the TPSA value for a molecule";
python::def("CalcTPSA", RDKit::Descriptors::calcTPSA,
(python::arg("mol"), python::arg("force") = false),
docString.c_str());
python::scope().attr("_CalcTPSA_version") = RDKit::Descriptors::tpsaVersion;
docString = "returns a list of atomic contributions to the TPSA";
python::def("_CalcTPSAContribs", computeTPSAContribs,
(python::arg("mol"), python::arg("force") = false),
docString.c_str());
docString = "returns the molecule's molecular weight";
python::def("_CalcMolWt", RDKit::Descriptors::calcAMW,
(python::arg("mol"), python::arg("onlyHeavy") = false),
docString.c_str());
python::scope().attr("_CalcMolWt_version") = "1.0.0";
docString = "returns the molecule's exact molecular weight";
python::def("CalcExactMolWt", RDKit::Descriptors::calcExactMW,
(python::arg("mol"), python::arg("onlyHeavy") = false),
docString.c_str());
python::scope().attr("_CalcExactMolWt_version") = "1.0.0";
docString = "returns the molecule's formula";
python::def("CalcMolFormula", RDKit::Descriptors::calcMolFormula,
(python::arg("mol"), python::arg("separateIsotopes") = false,
python::arg("abbreviateHIsotopes") = true),
docString.c_str());
python::scope().attr("_CalcMolFormula_version") = "1.3.0";
docString = "returns the number of Lipinski H-bond donors for a molecule";
python::def("CalcNumLipinskiHBD", RDKit::Descriptors::calcLipinskiHBD,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcNumLipinskiHBD_version") =
RDKit::Descriptors::lipinskiHBDVersion;
docString = "returns the number of Lipinski H-bond acceptors for a molecule";
python::def("CalcNumLipinskiHBA", RDKit::Descriptors::calcLipinskiHBA,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcNumLipinskiHBA_version") =
RDKit::Descriptors::lipinskiHBAVersion;
docString = "returns the number of H-bond donors for a molecule";
python::def("CalcNumHBD", RDKit::Descriptors::calcNumHBD,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcNumHBD_version") =
RDKit::Descriptors::NumHBDVersion;
docString = "returns the number of H-bond acceptors for a molecule";
python::def("CalcNumHBA", RDKit::Descriptors::calcNumHBA,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcNumHBA_version") =
RDKit::Descriptors::NumHBAVersion;
// exposes calcNumRotatableBondOptions (must be a better way!)
docString =
"Options for generating rotatble bonds\n\
NonStrict - standard loose definitions\n\
Strict - stricter definition excluding amides, esters, etc\n\
StrictLinkages - adds rotors between rotatable bonds\n\
Default - Current RDKit default\n";
python::enum_<RDKit::Descriptors::NumRotatableBondsOptions>(
"NumRotatableBondsOptions", docString.c_str())
.value("NonStrict", RDKit::Descriptors::NonStrict)
.value("Strict", RDKit::Descriptors::Strict)
.value("StrictLinkages", RDKit::Descriptors::StrictLinkages)
.value("Default", RDKit::Descriptors::Default);
#ifdef RDK_USE_STRICT_ROTOR_DEFINITION
docString =
"returns the number of rotatable bonds for a molecule.\n\
strict = NumRotatableBondsOptions.NonStrict - Simple rotatable bond definition.\n\
strict = NumRotatableBondsOptions.Strict - (default) does not count things like\n\
amide or ester bonds\n\
strict = NumRotatableBondsOptions.StrictLinkages - handles linkages between ring\n\
systems.\n\
- Single bonds between aliphatic ring Cs are always rotatable. This\n\
means that the central bond in CC1CCCC(C)C1-C1C(C)CCCC1C is now \n\
considered rotatable; it was not before\n\
- Heteroatoms in the linked rings no longer affect whether or not\n\
the linking bond is rotatable\n\
- the linking bond in systems like Cc1cccc(C)c1-c1c(C)cccc1 is now\n\
considered non-rotatable";
#else
docString =
"returns the number of rotatable bonds for a molecule.\n\
strict = NumRotatableBondsOptions.NonStrict - (default) Simple rotatable bond definition.\n\
strict = NumRotatableBondsOptions.Strict - does not count things like\n\
amide or ester bonds\n\
strict = NumRotatableBondsOptions.StrictLinkages - handles linkages between ring\n\
systems.\n\
- Single bonds between aliphatic ring Cs are always rotatable. This\n\
means that the central bond in CC1CCCC(C)C1-C1C(C)CCCC1C is now \n\
considered rotatable; it was not before\n\
- Heteroatoms in the linked rings no longer affect whether or not\n\
the linking bond is rotatable\n\
- the linking bond in systems like Cc1cccc(C)c1-c1c(C)cccc1 is now\n\
considered non-rotatable";
#endif
python::def("CalcNumRotatableBonds",
(unsigned int (*)(const RDKit::ROMol &,
bool))RDKit::Descriptors::calcNumRotatableBonds,
(python::arg("mol"), python::arg("strict")), docString.c_str());
python::def(
"CalcNumRotatableBonds",
(unsigned int (*)(const RDKit::ROMol &,
RDKit::Descriptors::NumRotatableBondsOptions))
RDKit::Descriptors::calcNumRotatableBonds,
(python::arg("mol"), python::arg("strict") = RDKit::Descriptors::Default),
docString.c_str());
python::scope().attr("_CalcNumRotatableBonds_version") =
RDKit::Descriptors::NumRotatableBondsVersion;
docString = "returns the number of rings for a molecule";
python::def("CalcNumRings", RDKit::Descriptors::calcNumRings,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcNumRings_version") =
RDKit::Descriptors::NumRingsVersion;
docString = "returns the number of aromatic rings for a molecule";
python::def("CalcNumAromaticRings", RDKit::Descriptors::calcNumAromaticRings,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcNumAromaticRings_version") =
RDKit::Descriptors::NumAromaticRingsVersion;
docString = "returns the number of saturated rings for a molecule";
python::def("CalcNumSaturatedRings",
RDKit::Descriptors::calcNumSaturatedRings, (python::arg("mol")),
docString.c_str());
python::scope().attr("_CalcNumSaturatedRings_version") =
RDKit::Descriptors::NumSaturatedRingsVersion;
docString = "returns the number of heterocycles for a molecule";
python::def("CalcNumHeterocycles", RDKit::Descriptors::calcNumHeterocycles,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcNumHeterocycles_version") =
RDKit::Descriptors::NumHeterocyclesVersion;
docString = "returns the number of aromatic heterocycles for a molecule";
python::def("CalcNumAromaticHeterocycles",
RDKit::Descriptors::calcNumAromaticHeterocycles,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcNumAromaticHeterocycles_version") =
RDKit::Descriptors::NumAromaticHeterocyclesVersion;
docString = "returns the number of aromatic carbocycles for a molecule";
python::def("CalcNumAromaticCarbocycles",
RDKit::Descriptors::calcNumAromaticCarbocycles,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcNumAromaticCarbocycles_version") =
RDKit::Descriptors::NumAromaticCarbocyclesVersion;
docString = "returns the number of saturated heterocycles for a molecule";
python::def("CalcNumSaturatedHeterocycles",
RDKit::Descriptors::calcNumSaturatedHeterocycles,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcNumSaturatedHeterocycles_version") =
RDKit::Descriptors::NumSaturatedHeterocyclesVersion;
docString = "returns the number of saturated carbocycles for a molecule";
python::def("CalcNumSaturatedCarbocycles",
RDKit::Descriptors::calcNumSaturatedCarbocycles,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcNumSaturatedCarbocycles_version") =
RDKit::Descriptors::NumSaturatedCarbocyclesVersion;
docString =
"returns the number of aliphatic (containing at least one non-aromatic "
"bond) rings for a molecule";
python::def("CalcNumAliphaticRings",
RDKit::Descriptors::calcNumAliphaticRings, (python::arg("mol")),
docString.c_str());
python::scope().attr("_CalcNumAliphaticRings_version") =
RDKit::Descriptors::NumAliphaticRingsVersion;
docString =
"returns the number of aliphatic (containing at least one non-aromatic "
"bond) heterocycles for a molecule";
python::def("CalcNumAliphaticHeterocycles",
RDKit::Descriptors::calcNumAliphaticHeterocycles,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcNumAliphaticHeterocycles_version") =
RDKit::Descriptors::NumAliphaticHeterocyclesVersion;
docString =
"returns the number of aliphatic (containing at least one non-aromatic "
"bond) carbocycles for a molecule";
python::def("CalcNumAliphaticCarbocycles",
RDKit::Descriptors::calcNumAliphaticCarbocycles,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcNumAliphaticCarbocycles_version") =
RDKit::Descriptors::NumAliphaticCarbocyclesVersion;
docString = "returns the number of heteroatoms for a molecule";
python::def("CalcNumHeteroatoms", RDKit::Descriptors::calcNumHeteroatoms,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcNumHeteroatoms_version") =
RDKit::Descriptors::NumHeteroatomsVersion;
docString = "returns the number of amide bonds in a molecule";
python::def("CalcNumAmideBonds", RDKit::Descriptors::calcNumAmideBonds,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcNumAmideBonds_version") =
RDKit::Descriptors::NumAmideBondsVersion;
docString = "returns the fraction of C atoms that are SP3 hybridized";
python::def("CalcFractionCSP3", RDKit::Descriptors::calcFractionCSP3,
(python::arg("mol")), docString.c_str());
python::scope().attr("_CalcFractionCSP3_version") =
RDKit::Descriptors::FractionCSP3Version;
docString = "returns the SlogP VSA contributions for a molecule";
python::def("SlogP_VSA_", CalcSlogPVSA,
(python::arg("mol"), python::arg("bins") = python::list(),
python::arg("force") = false));
docString = "returns the SMR VSA contributions for a molecule";
python::def("SMR_VSA_", CalcSMRVSA,
(python::arg("mol"), python::arg("bins") = python::list(),
python::arg("force") = false));
docString = "returns the PEOE VSA contributions for a molecule";
python::def("PEOE_VSA_", CalcPEOEVSA,
(python::arg("mol"), python::arg("bins") = python::list(),
python::arg("force") = false));
docString = "returns the MQN descriptors for a molecule";
python::def("MQNs_", CalcMQNs,
(python::arg("mol"), python::arg("force") = false));
docString =
"From equations (5),(9) and (10) of Rev. Comp. Chem. vol 2, 367-422, "
"(1991)";
python::def(
"CalcChiNv", RDKit::Descriptors::calcChiNv,
(python::arg("mol"), python::arg("n"), python::arg("force") = false));
python::scope().attr("_CalcChiNv_version") = RDKit::Descriptors::chiNvVersion;
docString =
"From equations (5),(9) and (10) of Rev. Comp. Chem. vol 2, 367-422, "
"(1991)";
python::def("CalcChi0v", RDKit::Descriptors::calcChi0v,
(python::arg("mol"), python::arg("force") = false));
python::scope().attr("_CalcChi0v_version") = RDKit::Descriptors::chi0vVersion;
docString =
"From equations (5),(9) and (10) of Rev. Comp. Chem. vol 2, 367-422, "
"(1991)";
python::def("CalcChi1v", RDKit::Descriptors::calcChi1v,
(python::arg("mol"), python::arg("force") = false));
python::scope().attr("_CalcChi1v_version") = RDKit::Descriptors::chi1vVersion;
docString =
"From equations (5),(9) and (10) of Rev. Comp. Chem. vol 2, 367-422, "
"(1991)";
python::def("CalcChi2v", RDKit::Descriptors::calcChi2v,
(python::arg("mol"), python::arg("force") = false));
python::scope().attr("_CalcChi2v_version") = RDKit::Descriptors::chi2vVersion;
docString =
"From equations (5),(9) and (10) of Rev. Comp. Chem. vol 2, 367-422, "
"(1991)";
python::def("CalcChi3v", RDKit::Descriptors::calcChi3v,
(python::arg("mol"), python::arg("force") = false));
python::scope().attr("_CalcChi3v_version") = RDKit::Descriptors::chi3vVersion;
docString =
"From equations (5),(9) and (10) of Rev. Comp. Chem. vol 2, 367-422, "
"(1991)";
python::def("CalcChi4v", RDKit::Descriptors::calcChi4v,
(python::arg("mol"), python::arg("force") = false));
python::scope().attr("_CalcChi4v_version") = RDKit::Descriptors::chi4vVersion;
docString =
"Similar to ChiXv, but uses uses nVal instead of valence. This makes a "
"big difference after we get out of the first row.";
python::def(
"CalcChiNn", RDKit::Descriptors::calcChiNn,
(python::arg("mol"), python::arg("n"), python::arg("force") = false));
python::scope().attr("_CalcChiNn_version") = RDKit::Descriptors::chiNnVersion;
docString =
"Similar to ChiXv, but uses uses nVal instead of valence. This makes a "
"big difference after we get out of the first row.";
python::def("CalcChi0n", RDKit::Descriptors::calcChi0n,
(python::arg("mol"), python::arg("force") = false));
python::scope().attr("_CalcChi0n_version") = RDKit::Descriptors::chi0nVersion;
docString =
"Similar to ChiXv, but uses uses nVal instead of valence. This makes a "
"big difference after we get out of the first row.";
python::def("CalcChi1n", RDKit::Descriptors::calcChi1n,
(python::arg("mol"), python::arg("force") = false));
python::scope().attr("_CalcChi1n_version") = RDKit::Descriptors::chi1nVersion;
docString =
"Similar to ChiXv, but uses uses nVal instead of valence. This makes a "
"big difference after we get out of the first row.";
python::def("CalcChi2n", RDKit::Descriptors::calcChi2n,
(python::arg("mol"), python::arg("force") = false));
python::scope().attr("_CalcChi2n_version") = RDKit::Descriptors::chi2nVersion;
docString =
"Similar to ChiXv, but uses uses nVal instead of valence. This makes a "
"big difference after we get out of the first row.";
python::def("CalcChi3n", RDKit::Descriptors::calcChi3n,
(python::arg("mol"), python::arg("force") = false));
python::scope().attr("_CalcChi3n_version") = RDKit::Descriptors::chi3nVersion;
docString =
"Similar to ChiXv, but uses uses nVal instead of valence. This makes a "
"big difference after we get out of the first row.";
python::def("CalcChi4n", RDKit::Descriptors::calcChi4n,
(python::arg("mol"), python::arg("force") = false));
python::scope().attr("_CalcChi4n_version") = RDKit::Descriptors::chi4nVersion;
docString = "From equation (58) of Rev. Comp. Chem. vol 2, 367-422, (1991)";
python::def(
"CalcHallKierAlpha", hkAlphaHelper,
(python::arg("mol"), python::arg("atomContribs") = python::object()));
python::scope().attr("_CalcHallKierAlpha_version") =
RDKit::Descriptors::hallKierAlphaVersion;
docString =
"From equations (58) and (59) of Rev. Comp. Chem. vol 2, 367-422, (1991)";
python::def("CalcKappa1", RDKit::Descriptors::calcKappa1,
(python::arg("mol")));
python::scope().attr("_CalcKappa1_version") =
RDKit::Descriptors::kappa1Version;
docString =
"From equations (58) and (60) of Rev. Comp. Chem. vol 2, 367-422, (1991)";
python::def("CalcKappa2", RDKit::Descriptors::calcKappa2,
(python::arg("mol")));
python::scope().attr("_CalcKappa2_version") =
RDKit::Descriptors::kappa2Version;
docString =
"From equations (58), (61) and (62) of Rev. Comp. Chem. vol 2, 367-422, "
"(1991)";
python::def("CalcKappa3", RDKit::Descriptors::calcKappa3,
(python::arg("mol")));
python::scope().attr("_CalcKappa3_version") =
RDKit::Descriptors::kappa3Version;
docString = "Returns the MACCS keys for a molecule as an ExplicitBitVect";
python::def("GetMACCSKeysFingerprint",
RDKit::MACCSFingerprints::getFingerprintAsBitVect,
(python::arg("mol")), docString.c_str(),
python::return_value_policy<python::manage_new_object>());
python::scope().attr("_CalcNumSpiroAtoms_version") =
RDKit::Descriptors::NumSpiroAtomsVersion;
docString =
"Returns the number of spiro atoms (atoms shared between rings that "
"share exactly one atom)";
python::def("CalcNumSpiroAtoms", numSpiroAtoms,
(python::arg("mol"), python::arg("atoms") = python::object()),
docString.c_str());
python::scope().attr("_CalcNumBridgeheadAtoms_version") =
RDKit::Descriptors::NumBridgeheadAtomsVersion;
docString =
"Returns the number of bridgehead atoms (atoms shared between rings that "
"share at least two bonds)";
python::def("CalcNumBridgeheadAtoms", numBridgeheadAtoms,
(python::arg("mol"), python::arg("atoms") = python::object()),
docString.c_str());
python::scope().attr("_CalcNumAtomStereoCenters_version") =
RDKit::Descriptors::NumAtomStereoCentersVersion;
docString =
"Returns the total number of atomic stereocenters (specified and "
"unspecified)";
python::def("CalcNumAtomStereoCenters",
RDKit::Descriptors::numAtomStereoCenters, (python::arg("mol")),
docString.c_str());
python::scope().attr("_CalcNumUnspecifiedAtomStereoCenters_version") =
RDKit::Descriptors::NumUnspecifiedAtomStereoCentersVersion;
docString = "Returns the number of unspecified atomic stereocenters";
python::def("CalcNumUnspecifiedAtomStereoCenters",
RDKit::Descriptors::numUnspecifiedAtomStereoCenters,
(python::arg("mol")), docString.c_str());
docString =
"Property computation class stored in the property registry.\n"
"See rdkit.Chem.rdMolDescriptor.Properties.GetProperty and \n"
"rdkit.Chem.Descriptor.Properties.PropertyFunctor for creating new ones";
python::class_<RDKit::Descriptors::PropertyFunctor,
RDKit::Descriptors::PropertyFunctor *,
boost::shared_ptr<RDKit::Descriptors::PropertyFunctor>,
boost::noncopyable>("PropertyFunctor", docString.c_str(),
python::no_init)
.def("__call__", &RDKit::Descriptors::PropertyFunctor::operator(),
"Compute the property for the specified molecule")
.def("GetName", &RDKit::Descriptors::PropertyFunctor::getName,
"Return the name of the property to calculate")
.def("GetVersion", &RDKit::Descriptors::PropertyFunctor::getVersion,
"Return the version of the calculated property");
iterable_converter().from_python<std::vector<std::string> >();
docString =
"Property computation and registry system. To compute all registered "
"properties:\n"
"mol = Chem.MolFromSmiles('c1ccccc1')\n"
"properties = rdMolDescriptors.Properties()\n"
"for name, value in zip(properties.GetPropertyNames(), "
"properties.ComputeProperties(mol)):\n"
" print(name, value)\n\n"
"To compute a subset\n"
"properties = rdMolDescriptors.Properties(['exactmw', 'lipinskiHBA'])\n"
"for name, value in zip(properties.GetPropertyNames(), "
"properties.ComputeProperties(mol)):\n"
" print(name, value)\n\n"
"";
python::class_<RDKit::Descriptors::Properties,
RDKit::Descriptors::Properties *>(
"Properties", docString.c_str(), python::init<>())
.def(python::init<const std::vector<std::string> &>())
.def("GetPropertyNames",
&RDKit::Descriptors::Properties::getPropertyNames,
"Return the property names computed by this instance")
.def("ComputeProperties",
&RDKit::Descriptors::Properties::computeProperties,
(python::arg("mol"), python::arg("annotateMol") = false),
"Return a list of computed properties, if annotateMol==True, "
"annotate the molecule with "
"the computed properties.")
.def("AnnotateProperties",
&RDKit::Descriptors::Properties::annotateProperties,
python::arg("mol"),
"Annotate the molecule with the computed properties. These "
"properties will be available "
"as SDData or from mol.GetProp(prop)")
.def("GetAvailableProperties",
&RDKit::Descriptors::Properties::getAvailableProperties,
"Return all available property names that can be computed")
.staticmethod("GetAvailableProperties")
.def("GetProperty", &RDKit::Descriptors::Properties::getProperty,
python::arg("propName"), "Return the named property if it exists")
.staticmethod("GetProperty")
.def("RegisterProperty",
&RDKit::Descriptors::Properties::registerProperty,
python::arg("propertyFunctor"),
"Register a new property object (not thread safe)")
.staticmethod("RegisterProperty");
python::class_<PythonPropertyFunctor, boost::noncopyable,
python::bases<RDKit::Descriptors::PropertyFunctor> >(
"PythonPropertyFunctor", "",
python::init<PyObject *, const std::string &, const std::string &>())
.def("__call__", &PythonPropertyFunctor::operator(),
"Compute the property for the specified molecule");
docString =
"Property Range Query for a molecule. Match(mol) -> true if in range";
python::class_<Queries::RangeQuery<double, RDKit::ROMol const &, true>,
Queries::RangeQuery<double, RDKit::ROMol const &, true> *,
boost::noncopyable>("PropertyRangeQuery", docString.c_str(),
python::no_init)
.def("Match",
&Queries::RangeQuery<double, RDKit::ROMol const &, true>::Match);
docString =
"Generates a Range property for the specified property, between min and "
"max\n"
"query = MakePropertyRangeQuery('exactmw', 0, 500)\n"
"query.Match( mol )";
python::def("MakePropertyRangeQuery",
RDKit::Descriptors::makePropertyRangeQuery,
(python::arg("name"), python::arg("min"), python::arg("max")),
docString.c_str(),
python::return_value_policy<python::manage_new_object>());
#ifdef RDK_BUILD_DESCRIPTORS3D
python::scope().attr("_CalcWHIM_version") = RDKit::Descriptors::WHIMVersion;
docString = "Returns the WHIM descriptors vector";
python::def("CalcWHIM", calcWHIMs,
(python::arg("mol"), python::arg("confId") = -1,
python::arg("thresh") = 0.001),
docString.c_str());
python::scope().attr("_CalcGETAWAY_version") =
RDKit::Descriptors::GETAWAYVersion;
docString = "Returns the GETAWAY descriptors vector";
python::def("CalcGETAWAY", calcGETAWAYs,
(python::arg("mol"), python::arg("confId") = -1,
python::arg("precision") = 2),
docString.c_str());
python::scope().attr("_CalcRDF_version") = RDKit::Descriptors::RDFVersion;
docString = "Returns radial distribution fonction descriptors (RDF)";
python::def("CalcRDF", calcRDFs,
(python::arg("mol"), python::arg("confId") = -1),
docString.c_str());
python::scope().attr("_CalcMORSE_version") = RDKit::Descriptors::MORSEVersion;
docString =
"Returns Molecule Representation of Structures based on Electron "
"diffraction descriptors";
python::def("CalcMORSE", calcMORSEs,
(python::arg("mol"), python::arg("confId") = -1),
docString.c_str());
python::scope().attr("_CalcAUTOCORR3D_version") =
RDKit::Descriptors::AUTOCORR3DVersion;
docString = "Returns 3D Autocorrelation descriptors vector";
python::def("CalcAUTOCORR3D", calcAUTOCORR3Ds,
(python::arg("mol"), python::arg("confId") = -1),
docString.c_str());
python::scope().attr("_CalcPBF_version") = RDKit::Descriptors::PBFVersion;
docString =
"Returns the PBF (plane of best fit) descriptor "
"(http://dx.doi.org/10.1021/ci300293f)";
python::def("CalcPBF", RDKit::Descriptors::PBF,
(python::arg("mol"), python::arg("confId") = -1),
docString.c_str());
python::scope().attr("_CalcNPR1_version") = RDKit::Descriptors::NPR1Version;
docString = "";
python::def("CalcNPR1", RDKit::Descriptors::NPR1,
(python::arg("mol"), python::arg("confId") = -1,
python::arg("useAtomicMasses") = true),
docString.c_str());
python::scope().attr("_CalcNPR2_version") = RDKit::Descriptors::NPR2Version;
docString = "";
python::def("CalcNPR2", RDKit::Descriptors::NPR2,
(python::arg("mol"), python::arg("confId") = -1,
python::arg("useAtomicMasses") = true),
docString.c_str());
python::scope().attr("_CalcPMI1_version") = RDKit::Descriptors::PMI1Version;
docString = "";
python::def("CalcPMI1", RDKit::Descriptors::PMI1,
(python::arg("mol"), python::arg("confId") = -1,
python::arg("useAtomicMasses") = true),
docString.c_str());
python::scope().attr("_CalcPMI2_version") = RDKit::Descriptors::PMI2Version;
docString = "";
python::def("CalcPMI2", RDKit::Descriptors::PMI2,
(python::arg("mol"), python::arg("confId") = -1,
python::arg("useAtomicMasses") = true),
docString.c_str());
python::scope().attr("_CalcPMI3_version") = RDKit::Descriptors::PMI3Version;
docString = "";
python::def("CalcPMI3", RDKit::Descriptors::PMI3,
(python::arg("mol"), python::arg("confId") = -1,
python::arg("useAtomicMasses") = true),
docString.c_str());
python::scope().attr("_CalcRadiusOfGyration_version") =
RDKit::Descriptors::radiusOfGyrationVersion;
docString = "";
python::def("CalcRadiusOfGyration", RDKit::Descriptors::radiusOfGyration,
(python::arg("mol"), python::arg("confId") = -1,
python::arg("useAtomicMasses") = true),
docString.c_str());
python::scope().attr("_CalcInertialShapeFactor_version") =
RDKit::Descriptors::inertialShapeFactorVersion;
docString = "";
python::def("CalcInertialShapeFactor",
RDKit::Descriptors::inertialShapeFactor,
(python::arg("mol"), python::arg("confId") = -1,
python::arg("useAtomicMasses") = true),
docString.c_str());
python::scope().attr("_CalcEccentricity_version") =
RDKit::Descriptors::eccentricityVersion;
docString = "";
python::def("CalcEccentricity", RDKit::Descriptors::eccentricity,
(python::arg("mol"), python::arg("confId") = -1,
python::arg("useAtomicMasses") = true),
docString.c_str());
python::scope().attr("_CalcAsphericity_version") =
RDKit::Descriptors::asphericityVersion;
docString = "";
python::def("CalcAsphericity", RDKit::Descriptors::asphericity,
(python::arg("mol"), python::arg("confId") = -1,
python::arg("useAtomicMasses") = true),
docString.c_str());
python::scope().attr("_CalcSpherocityIndex_version") =
RDKit::Descriptors::spherocityIndexVersion;
docString = "";
python::def("CalcSpherocityIndex", RDKit::Descriptors::spherocityIndex,
(python::arg("mol"), python::arg("confId") = -1),
docString.c_str());
python::scope().attr("_CalcAUTOCORR2D_version") =
RDKit::Descriptors::AUTOCORR2DVersion;
docString = "Returns 2D Autocorrelation descriptors vector";
python::def("CalcAUTOCORR2D", calcAUTOCORR2Ds, (python::arg("mol")),
docString.c_str());
#endif
}
| 1 | 18,020 | This isn't how you throw an exception in C++. I'd suggest you take a look elsewhere in the RDKit C++ codebase and see how ValueError exceptions are thrown. Hint: there's a function called `throw_value_error()` that you may find useful. | rdkit-rdkit | cpp |
@@ -662,7 +662,7 @@ TEST (active_transactions, restart_dropped)
}
// Verify the block was updated in the ledger
{
- auto block (node.store.block_get (node.store.tx_begin_read (), send1->hash ()));
+ auto block (node.store.block_get (node.store.tx_begin_write (), send1->hash ()));
ASSERT_EQ (work2, block->block_work ());
}
// Drop election | 1 | #include <nano/core_test/testutil.hpp>
#include <nano/lib/jsonconfig.hpp>
#include <nano/node/election.hpp>
#include <nano/node/testing.hpp>
#include <gtest/gtest.h>
using namespace std::chrono_literals;
TEST (active_transactions, confirm_one)
{
nano::system system;
nano::node_config node_config (24000, system.logging);
auto & node1 = *system.add_node (node_config);
// Send and vote for a block before peering with node2
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto send (system.wallet (0)->send_action (nano::test_genesis_key.pub, nano::public_key (), node_config.receive_minimum.number ()));
system.deadline_set (5s);
while (!node1.active.empty () && !node1.block_confirmed_or_being_confirmed (node1.store.tx_begin_read (), send->hash ()))
{
ASSERT_NO_ERROR (system.poll ());
}
node_config.peering_port = 24001;
auto & node2 = *system.add_node (node_config);
system.deadline_set (5s);
// Let node2 know about the block
while (node2.active.empty ())
{
node1.network.flood_block (send, false);
ASSERT_NO_ERROR (system.poll ());
}
while (!node2.active.empty ())
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (0, node2.active.dropped_elections_cache_size ());
nano::unique_lock<std::mutex> active_lock (node2.active.mutex);
while (node2.active.confirmed.empty ())
{
active_lock.unlock ();
ASSERT_NO_ERROR (system.poll ());
active_lock.lock ();
}
}
TEST (active_transactions, adjusted_difficulty_priority)
{
nano::system system;
nano::node_config node_config (nano::get_available_port (), system.logging);
node_config.enable_voting = false;
node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled;
auto & node1 = *system.add_node (node_config);
nano::genesis genesis;
nano::keypair key1, key2, key3;
auto send1 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount - 10 * nano::xrb_ratio, key1.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (genesis.hash ())));
auto send2 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, send1->hash (), nano::test_genesis_key.pub, nano::genesis_amount - 20 * nano::xrb_ratio, key2.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (send1->hash ())));
auto open1 (std::make_shared<nano::state_block> (key1.pub, 0, key1.pub, 10 * nano::xrb_ratio, send1->hash (), key1.prv, key1.pub, *system.work.generate (key1.pub)));
auto open2 (std::make_shared<nano::state_block> (key2.pub, 0, key2.pub, 10 * nano::xrb_ratio, send2->hash (), key2.prv, key2.pub, *system.work.generate (key2.pub)));
node1.process_active (send1); // genesis
node1.process_active (send2); // genesis
node1.process_active (open1); // key1
node1.process_active (open2); // key2
system.deadline_set (10s);
while (node1.active.size () != 4)
{
ASSERT_NO_ERROR (system.poll ());
}
// Check adjusted difficulty
{
nano::lock_guard<std::mutex> active_guard (node1.active.mutex);
ASSERT_EQ (node1.active.roots.get<1> ().begin ()->election->status.winner->hash (), send1->hash ());
ASSERT_LT (node1.active.roots.find (send2->qualified_root ())->adjusted_difficulty, node1.active.roots.find (send1->qualified_root ())->adjusted_difficulty);
ASSERT_LT (node1.active.roots.find (open1->qualified_root ())->adjusted_difficulty, node1.active.roots.find (send1->qualified_root ())->adjusted_difficulty);
ASSERT_LT (node1.active.roots.find (open2->qualified_root ())->adjusted_difficulty, node1.active.roots.find (send2->qualified_root ())->adjusted_difficulty);
}
// Confirm elections
while (node1.active.size () != 0)
{
nano::lock_guard<std::mutex> active_guard (node1.active.mutex);
auto it (node1.active.roots.begin ());
while (!node1.active.roots.empty () && it != node1.active.roots.end ())
{
auto election (it->election);
election->confirm_once ();
it = node1.active.roots.begin ();
}
}
{
system.deadline_set (10s);
nano::unique_lock<std::mutex> active_lock (node1.active.mutex);
while (node1.active.confirmed.size () != 4)
{
active_lock.unlock ();
ASSERT_NO_ERROR (system.poll ());
active_lock.lock ();
}
}
//genesis and key1,key2 are opened
//start chain of 2 on each
auto send3 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, send2->hash (), nano::test_genesis_key.pub, 9 * nano::xrb_ratio, key3.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (send2->hash (), nano::difficulty::from_multiplier (1500, node1.network_params.network.publish_threshold))));
auto send4 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, send3->hash (), nano::test_genesis_key.pub, 8 * nano::xrb_ratio, key3.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (send3->hash (), nano::difficulty::from_multiplier (1500, node1.network_params.network.publish_threshold))));
auto send5 (std::make_shared<nano::state_block> (key1.pub, open1->hash (), key1.pub, 9 * nano::xrb_ratio, key3.pub, key1.prv, key1.pub, *system.work.generate (open1->hash (), nano::difficulty::from_multiplier (100, node1.network_params.network.publish_threshold))));
auto send6 (std::make_shared<nano::state_block> (key1.pub, send5->hash (), key1.pub, 8 * nano::xrb_ratio, key3.pub, key1.prv, key1.pub, *system.work.generate (send5->hash (), nano::difficulty::from_multiplier (100, node1.network_params.network.publish_threshold))));
auto send7 (std::make_shared<nano::state_block> (key2.pub, open2->hash (), key2.pub, 9 * nano::xrb_ratio, key3.pub, key2.prv, key2.pub, *system.work.generate (open2->hash (), nano::difficulty::from_multiplier (500, node1.network_params.network.publish_threshold))));
auto send8 (std::make_shared<nano::state_block> (key2.pub, send7->hash (), key2.pub, 8 * nano::xrb_ratio, key3.pub, key2.prv, key2.pub, *system.work.generate (send7->hash (), nano::difficulty::from_multiplier (500, node1.network_params.network.publish_threshold))));
node1.process_active (send3); // genesis
node1.process_active (send5); // key1
node1.process_active (send7); // key2
node1.process_active (send4); // genesis
node1.process_active (send6); // key1
node1.process_active (send8); // key2
system.deadline_set (10s);
while (node1.active.size () != 6)
{
ASSERT_NO_ERROR (system.poll ());
}
// Check adjusted difficulty
nano::lock_guard<std::mutex> lock (node1.active.mutex);
uint64_t last_adjusted (0);
for (auto i (node1.active.roots.get<1> ().begin ()), n (node1.active.roots.get<1> ().end ()); i != n; ++i)
{
//first root has nothing to compare
if (last_adjusted != 0)
{
ASSERT_LT (i->adjusted_difficulty, last_adjusted);
}
last_adjusted = i->adjusted_difficulty;
}
ASSERT_LT (node1.active.roots.find (send4->qualified_root ())->adjusted_difficulty, node1.active.roots.find (send3->qualified_root ())->adjusted_difficulty);
ASSERT_LT (node1.active.roots.find (send6->qualified_root ())->adjusted_difficulty, node1.active.roots.find (send5->qualified_root ())->adjusted_difficulty);
ASSERT_LT (node1.active.roots.find (send8->qualified_root ())->adjusted_difficulty, node1.active.roots.find (send7->qualified_root ())->adjusted_difficulty);
}
TEST (active_transactions, adjusted_difficulty_overflow_max)
{
nano::system system;
nano::node_config node_config (nano::get_available_port (), system.logging);
node_config.enable_voting = false;
node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled;
auto & node1 = *system.add_node (node_config);
nano::genesis genesis;
nano::keypair key1, key2;
auto send1 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount - 10 * nano::xrb_ratio, key1.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (genesis.hash ())));
auto send2 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, send1->hash (), nano::test_genesis_key.pub, nano::genesis_amount - 20 * nano::xrb_ratio, key2.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (send1->hash ())));
auto open1 (std::make_shared<nano::state_block> (key1.pub, 0, key1.pub, 10 * nano::xrb_ratio, send1->hash (), key1.prv, key1.pub, *system.work.generate (key1.pub)));
auto open2 (std::make_shared<nano::state_block> (key2.pub, 0, key2.pub, 10 * nano::xrb_ratio, send2->hash (), key2.prv, key2.pub, *system.work.generate (key2.pub)));
node1.process_active (send1); // genesis
node1.process_active (send2); // genesis
node1.process_active (open1); // key1
node1.process_active (open2); // key2
system.deadline_set (10s);
while (node1.active.size () != 4)
{
ASSERT_NO_ERROR (system.poll ());
}
{
nano::lock_guard<std::mutex> active_guard (node1.active.mutex);
// Update difficulty to maximum
auto send1_root (node1.active.roots.find (send1->qualified_root ()));
auto send2_root (node1.active.roots.find (send2->qualified_root ()));
auto open1_root (node1.active.roots.find (open1->qualified_root ()));
auto open2_root (node1.active.roots.find (open2->qualified_root ()));
// clang-format off
auto modify_difficulty = [& roots = node1.active.roots](auto & existing_root) {
roots.modify (existing_root, [](nano::conflict_info & info_a) {
info_a.difficulty = std::numeric_limits<std::uint64_t>::max ();
});
};
// clang-format on
modify_difficulty (send1_root);
modify_difficulty (send2_root);
modify_difficulty (open1_root);
modify_difficulty (open2_root);
node1.active.adjust_difficulty (send2->hash ());
// Test overflow
ASSERT_EQ (node1.active.roots.get<1> ().begin ()->election->status.winner->hash (), send1->hash ());
ASSERT_EQ (send1_root->adjusted_difficulty, std::numeric_limits<std::uint64_t>::max ());
ASSERT_LT (send2_root->adjusted_difficulty, send1_root->adjusted_difficulty);
ASSERT_LT (open1_root->adjusted_difficulty, send1_root->adjusted_difficulty);
ASSERT_LT (open2_root->adjusted_difficulty, send2_root->adjusted_difficulty);
}
}
TEST (active_transactions, adjusted_difficulty_overflow_min)
{
nano::system system;
nano::node_config node_config (nano::get_available_port (), system.logging);
node_config.enable_voting = false;
node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled;
auto & node1 = *system.add_node (node_config);
nano::genesis genesis;
nano::keypair key1, key2, key3;
auto send1 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount - 10 * nano::xrb_ratio, key1.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (genesis.hash ())));
auto send2 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, send1->hash (), nano::test_genesis_key.pub, nano::genesis_amount - 20 * nano::xrb_ratio, key2.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (send1->hash ())));
auto open1 (std::make_shared<nano::state_block> (key1.pub, 0, key1.pub, 10 * nano::xrb_ratio, send1->hash (), key1.prv, key1.pub, *system.work.generate (key1.pub)));
auto open2 (std::make_shared<nano::state_block> (key2.pub, 0, key2.pub, 10 * nano::xrb_ratio, send2->hash (), key2.prv, key2.pub, *system.work.generate (key2.pub)));
auto send3 (std::make_shared<nano::state_block> (key2.pub, open2->hash (), key2.pub, 9 * nano::xrb_ratio, key3.pub, key2.prv, key2.pub, *system.work.generate (open2->hash ())));
node1.process_active (send1); // genesis
node1.process_active (send2); // genesis
node1.process_active (open1); // key1
node1.process_active (open2); // key2
node1.process_active (send3); // key2
system.deadline_set (10s);
while (node1.active.size () != 5)
{
ASSERT_NO_ERROR (system.poll ());
}
{
nano::lock_guard<std::mutex> active_guard (node1.active.mutex);
// Update difficulty to minimum
auto send1_root (node1.active.roots.find (send1->qualified_root ()));
auto send2_root (node1.active.roots.find (send2->qualified_root ()));
auto open1_root (node1.active.roots.find (open1->qualified_root ()));
auto open2_root (node1.active.roots.find (open2->qualified_root ()));
auto send3_root (node1.active.roots.find (send3->qualified_root ()));
// clang-format off
auto modify_difficulty = [& roots = node1.active.roots](auto & existing_root) {
roots.modify (existing_root, [](nano::conflict_info & info_a) {
info_a.difficulty = std::numeric_limits<std::uint64_t>::min () + 1;
});
};
// clang-format on
modify_difficulty (send1_root);
modify_difficulty (send2_root);
modify_difficulty (open1_root);
modify_difficulty (open2_root);
modify_difficulty (send3_root);
node1.active.adjust_difficulty (send1->hash ());
// Test overflow
ASSERT_EQ (node1.active.roots.get<1> ().begin ()->election->status.winner->hash (), send1->hash ());
ASSERT_EQ (send1_root->adjusted_difficulty, std::numeric_limits<std::uint64_t>::min () + 3);
ASSERT_LT (send2_root->adjusted_difficulty, send1_root->adjusted_difficulty);
ASSERT_LT (open1_root->adjusted_difficulty, send1_root->adjusted_difficulty);
ASSERT_LT (open2_root->adjusted_difficulty, send2_root->adjusted_difficulty);
ASSERT_LT (send3_root->adjusted_difficulty, open2_root->adjusted_difficulty);
ASSERT_EQ (send3_root->adjusted_difficulty, std::numeric_limits<std::uint64_t>::min ());
// Clear roots with too low difficulty to prevent issues
node1.active.roots.clear ();
}
}
TEST (active_transactions, keep_local)
{
nano::system system;
nano::node_config node_config (nano::get_available_port (), system.logging);
node_config.enable_voting = false;
node_config.active_elections_size = 2; //bound to 2, wont drop wallet created transactions, but good to test dropping remote
// Disable frontier confirmation to allow the test to finish before
node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled;
auto & node = *system.add_node (node_config);
auto & wallet (*system.wallet (0));
nano::genesis genesis;
//key 1/2 will be managed by the wallet
nano::keypair key1, key2, key3, key4, key5, key6;
wallet.insert_adhoc (nano::test_genesis_key.prv);
auto send1 (wallet.send_action (nano::test_genesis_key.pub, key1.pub, node.config.receive_minimum.number ()));
auto send2 (wallet.send_action (nano::test_genesis_key.pub, key2.pub, node.config.receive_minimum.number ()));
auto send3 (wallet.send_action (nano::test_genesis_key.pub, key3.pub, node.config.receive_minimum.number ()));
auto send4 (wallet.send_action (nano::test_genesis_key.pub, key4.pub, node.config.receive_minimum.number ()));
auto send5 (wallet.send_action (nano::test_genesis_key.pub, key5.pub, node.config.receive_minimum.number ()));
auto send6 (wallet.send_action (nano::test_genesis_key.pub, key6.pub, node.config.receive_minimum.number ()));
system.deadline_set (10s);
// should not drop wallet created transactions
while (node.active.size () != 6)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (0, node.active.dropped_elections_cache_size ());
while (!node.active.empty ())
{
nano::lock_guard<std::mutex> active_guard (node.active.mutex);
auto it (node.active.roots.begin ());
while (!node.active.roots.empty () && it != node.active.roots.end ())
{
(it->election)->confirm_once ();
it = node.active.roots.begin ();
}
}
auto open1 (std::make_shared<nano::state_block> (key1.pub, 0, key1.pub, node.config.receive_minimum.number (), send1->hash (), key1.prv, key1.pub, *system.work.generate (key1.pub)));
node.process_active (open1);
node.active.start (open1);
auto open2 (std::make_shared<nano::state_block> (key2.pub, 0, key2.pub, node.config.receive_minimum.number (), send2->hash (), key2.prv, key2.pub, *system.work.generate (key2.pub)));
node.process_active (open2);
node.active.start (open2);
auto open3 (std::make_shared<nano::state_block> (key3.pub, 0, key3.pub, node.config.receive_minimum.number (), send3->hash (), key3.prv, key3.pub, *system.work.generate (key3.pub)));
node.process_active (open3);
node.active.start (open3);
ASSERT_EQ (3, node.active.size ());
system.deadline_set (10s);
// bound elections, should drop after one loop
while (node.active.size () != node_config.active_elections_size)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (1, node.active.dropped_elections_cache_size ());
}
TEST (active_transactions, prioritize_chains)
{
nano::system system;
nano::node_config node_config (nano::get_available_port (), system.logging);
node_config.enable_voting = false;
node_config.active_elections_size = 4; //bound to 4, wont drop wallet created transactions, but good to test dropping remote
// Disable frontier confirmation to allow the test to finish before
node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled;
auto & node1 = *system.add_node (node_config);
nano::genesis genesis;
nano::keypair key1, key2, key3;
auto send1 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount - 10 * nano::xrb_ratio, key1.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (genesis.hash ())));
auto open1 (std::make_shared<nano::state_block> (key1.pub, 0, key1.pub, 10 * nano::xrb_ratio, send1->hash (), key1.prv, key1.pub, *system.work.generate (key1.pub)));
auto send2 (std::make_shared<nano::state_block> (key1.pub, open1->hash (), key1.pub, nano::xrb_ratio * 9, key2.pub, key1.prv, key1.pub, *system.work.generate (open1->hash ())));
auto send3 (std::make_shared<nano::state_block> (key1.pub, send2->hash (), key1.pub, nano::xrb_ratio * 8, key2.pub, key1.prv, key1.pub, *system.work.generate (send2->hash ())));
auto send4 (std::make_shared<nano::state_block> (key1.pub, send3->hash (), key1.pub, nano::xrb_ratio * 7, key2.pub, key1.prv, key1.pub, *system.work.generate (send3->hash ())));
auto send5 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, send1->hash (), nano::test_genesis_key.pub, nano::genesis_amount - 20 * nano::xrb_ratio, key2.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (send1->hash ())));
auto send6 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, send5->hash (), nano::test_genesis_key.pub, nano::genesis_amount - 30 * nano::xrb_ratio, key3.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (send5->hash ())));
auto open2 (std::make_shared<nano::state_block> (key2.pub, 0, key2.pub, 10 * nano::xrb_ratio, send5->hash (), key2.prv, key2.pub, *system.work.generate (key2.pub, nano::difficulty::from_multiplier (50., node1.network_params.network.publish_threshold))));
uint64_t difficulty1 (0);
nano::work_validate (*open2, &difficulty1);
uint64_t difficulty2 (0);
nano::work_validate (*send6, &difficulty2);
node1.process_active (send1);
node1.process_active (open1);
node1.process_active (send5);
system.deadline_set (10s);
while (node1.active.size () != 3)
{
ASSERT_NO_ERROR (system.poll ());
}
while (node1.active.size () != 0)
{
nano::lock_guard<std::mutex> active_guard (node1.active.mutex);
auto it (node1.active.roots.get<1> ().begin ());
while (!node1.active.roots.empty () && it != node1.active.roots.get<1> ().end ())
{
auto election (it->election);
election->confirm_once ();
it = node1.active.roots.get<1> ().begin ();
}
}
node1.process_active (send2);
node1.process_active (send3);
node1.process_active (send4);
node1.process_active (send6);
system.deadline_set (10s);
while (node1.active.size () != 4)
{
ASSERT_NO_ERROR (system.poll ());
}
system.deadline_set (10s);
std::this_thread::sleep_for (1s);
node1.process_active (open2);
system.deadline_set (10s);
while (node1.active.size () != 4)
{
ASSERT_NO_ERROR (system.poll ());
}
size_t seen (0);
{
auto it (node1.active.roots.get<1> ().begin ());
while (!node1.active.roots.empty () && it != node1.active.roots.get<1> ().end ())
{
if (it->difficulty == (difficulty1 || difficulty2))
{
seen++;
}
it++;
}
}
ASSERT_LT (seen, 2);
ASSERT_EQ (node1.active.size (), 4);
}
TEST (active_transactions, inactive_votes_cache)
{
nano::system system (1);
nano::block_hash latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
nano::keypair key;
auto send (std::make_shared<nano::send_block> (latest, key.pub, nano::genesis_amount - 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (latest)));
auto vote (std::make_shared<nano::vote> (nano::test_genesis_key.pub, nano::test_genesis_key.prv, 0, std::vector<nano::block_hash> (1, send->hash ())));
system.nodes[0]->vote_processor.vote (vote, std::make_shared<nano::transport::channel_udp> (system.nodes[0]->network.udp_channels, system.nodes[0]->network.endpoint (), system.nodes[0]->network_params.protocol.protocol_version));
system.deadline_set (5s);
while (system.nodes[0]->active.inactive_votes_cache_size () != 1)
{
ASSERT_NO_ERROR (system.poll ());
}
system.nodes[0]->process_active (send);
system.nodes[0]->block_processor.flush ();
bool confirmed (false);
system.deadline_set (5s);
while (!confirmed)
{
auto transaction (system.nodes[0]->store.tx_begin_read ());
confirmed = system.nodes[0]->ledger.block_confirmed (transaction, send->hash ());
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (1, system.nodes[0]->stats.count (nano::stat::type::election, nano::stat::detail::vote_cached));
}
TEST (active_transactions, inactive_votes_cache_fork)
{
nano::system system (1);
nano::block_hash latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
nano::keypair key;
auto send1 (std::make_shared<nano::send_block> (latest, key.pub, nano::genesis_amount - 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (latest)));
auto send2 (std::make_shared<nano::send_block> (latest, key.pub, nano::genesis_amount - 200, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (latest)));
auto vote (std::make_shared<nano::vote> (nano::test_genesis_key.pub, nano::test_genesis_key.prv, 0, std::vector<nano::block_hash> (1, send1->hash ())));
system.nodes[0]->vote_processor.vote (vote, std::make_shared<nano::transport::channel_udp> (system.nodes[0]->network.udp_channels, system.nodes[0]->network.endpoint (), system.nodes[0]->network_params.protocol.protocol_version));
auto channel1 (system.nodes [0]->network.udp_channels.create (system.nodes [0]->network.endpoint ()));
system.deadline_set (5s);
while (system.nodes[0]->active.inactive_votes_cache_size () != 1)
{
ASSERT_NO_ERROR (system.poll ());
}
system.nodes[0]->network.process_message (nano::publish (send2), channel1);
system.nodes[0]->block_processor.flush ();
ASSERT_NE (nullptr, system.nodes[0]->block (send2->hash ()));
system.nodes[0]->network.process_message (nano::publish (send1), channel1);
system.nodes[0]->block_processor.flush ();
bool confirmed (false);
system.deadline_set (5s);
while (!confirmed)
{
auto transaction (system.nodes[0]->store.tx_begin_read ());
confirmed = system.nodes[0]->block (send1->hash ()) != nullptr && system.nodes[0]->ledger.block_confirmed (transaction, send1->hash ());
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (1, system.nodes[0]->stats.count (nano::stat::type::election, nano::stat::detail::vote_cached));
}
TEST (active_transactions, inactive_votes_cache_existing_vote)
{
nano::system system;
nano::node_config node_config (nano::get_available_port (), system.logging);
node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled;
auto node = system.add_node (node_config);
nano::block_hash latest (node->latest (nano::test_genesis_key.pub));
nano::keypair key;
auto send (std::make_shared<nano::send_block> (latest, key.pub, nano::genesis_amount - 100 * nano::Gxrb_ratio, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (latest)));
auto open (std::make_shared<nano::state_block> (key.pub, 0, key.pub, 100 * nano::Gxrb_ratio, send->hash (), key.prv, key.pub, *system.work.generate (key.pub))); // Increase key weight
node->process_active (send);
node->block_processor.add (open);
node->block_processor.flush ();
system.deadline_set (5s);
while (node->active.size () != 1)
{
ASSERT_NO_ERROR (system.poll ());
}
std::shared_ptr<nano::election> election;
{
nano::lock_guard<std::mutex> active_guard (node->active.mutex);
auto it (node->active.roots.begin ());
ASSERT_NE (node->active.roots.end (), it);
election = it->election;
}
ASSERT_GT (node->weight (key.pub), node->minimum_principal_weight ());
// Insert vote
auto vote1 (std::make_shared<nano::vote> (key.pub, key.prv, 1, std::vector<nano::block_hash> (1, send->hash ())));
node->vote_processor.vote (vote1, std::make_shared<nano::transport::channel_udp> (system.nodes[0]->network.udp_channels, system.nodes[0]->network.endpoint (), system.nodes[0]->network_params.protocol.protocol_version));
system.deadline_set (5s);
bool done (false);
while (!done)
{
nano::unique_lock<std::mutex> active_lock (node->active.mutex);
done = (election->last_votes.size () == 2);
active_lock.unlock ();
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (1, system.nodes[0]->stats.count (nano::stat::type::election, nano::stat::detail::vote_new));
nano::lock_guard<std::mutex> active_guard (node->active.mutex);
auto last_vote1 (election->last_votes[key.pub]);
ASSERT_EQ (send->hash (), last_vote1.hash);
ASSERT_EQ (1, last_vote1.sequence);
// Attempt to change vote with inactive_votes_cache
node->active.add_inactive_votes_cache (send->hash (), key.pub);
ASSERT_EQ (1, node->active.find_inactive_votes_cache (send->hash ()).voters.size ());
election->insert_inactive_votes_cache (send->hash ());
// Check that election data is not changed
ASSERT_EQ (2, election->last_votes.size ());
auto last_vote2 (election->last_votes[key.pub]);
ASSERT_EQ (last_vote1.hash, last_vote2.hash);
ASSERT_EQ (last_vote1.sequence, last_vote2.sequence);
ASSERT_EQ (last_vote1.time, last_vote2.time);
ASSERT_EQ (0, system.nodes[0]->stats.count (nano::stat::type::election, nano::stat::detail::vote_cached));
}
TEST (active_transactions, inactive_votes_cache_multiple_votes)
{
nano::system system;
nano::node_config node_config (nano::get_available_port (), system.logging);
node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled;
auto node = system.add_node (node_config);
nano::block_hash latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
nano::keypair key1;
auto send1 (std::make_shared<nano::send_block> (latest, key1.pub, nano::genesis_amount - 100 * nano::Gxrb_ratio, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (latest)));
auto send2 (std::make_shared<nano::send_block> (send1->hash (), key1.pub, 100 * nano::Gxrb_ratio, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (send1->hash ()))); // Decrease genesis weight to prevent election confirmation
auto open (std::make_shared<nano::state_block> (key1.pub, 0, key1.pub, 100 * nano::Gxrb_ratio, send1->hash (), key1.prv, key1.pub, *system.work.generate (key1.pub))); // Increase key1 weight
node->block_processor.add (send1);
node->block_processor.add (send2);
node->block_processor.add (open);
node->block_processor.flush ();
// Process votes
auto vote1 (std::make_shared<nano::vote> (key1.pub, key1.prv, 0, std::vector<nano::block_hash> (1, send1->hash ())));
system.nodes[0]->vote_processor.vote (vote1, std::make_shared<nano::transport::channel_udp> (system.nodes[0]->network.udp_channels, system.nodes[0]->network.endpoint (), system.nodes[0]->network_params.protocol.protocol_version));
auto vote2 (std::make_shared<nano::vote> (nano::test_genesis_key.pub, nano::test_genesis_key.prv, 0, std::vector<nano::block_hash> (1, send1->hash ())));
system.nodes[0]->vote_processor.vote (vote2, std::make_shared<nano::transport::channel_udp> (system.nodes[0]->network.udp_channels, system.nodes[0]->network.endpoint (), system.nodes[0]->network_params.protocol.protocol_version));
system.deadline_set (5s);
while (true)
{
{
nano::lock_guard<std::mutex> active_guard (system.nodes[0]->active.mutex);
if (system.nodes[0]->active.find_inactive_votes_cache (send1->hash ()).voters.size () == 2)
{
break;
}
}
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (1, system.nodes[0]->active.inactive_votes_cache_size ());
// Start election
system.nodes[0]->active.start (send1);
{
nano::lock_guard<std::mutex> active_guard (system.nodes[0]->active.mutex);
auto it (system.nodes[0]->active.roots.begin ());
ASSERT_NE (system.nodes[0]->active.roots.end (), it);
ASSERT_EQ (3, it->election->last_votes.size ()); // 2 votes and 1 default not_an_acount
}
ASSERT_EQ (2, system.nodes[0]->stats.count (nano::stat::type::election, nano::stat::detail::vote_cached));
}
TEST (active_transactions, update_difficulty)
{
nano::system system (2);
auto & node1 = *system.nodes[0];
auto & node2 = *system.nodes[1];
nano::genesis genesis;
nano::keypair key1;
// Generate blocks & start elections
auto send1 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount - 100, key1.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (genesis.hash ())));
uint64_t difficulty1 (0);
nano::work_validate (*send1, &difficulty1);
auto send2 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, send1->hash (), nano::test_genesis_key.pub, nano::genesis_amount - 200, key1.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (send1->hash ())));
uint64_t difficulty2 (0);
nano::work_validate (*send2, &difficulty2);
node1.process_active (send1);
node1.process_active (send2);
node1.block_processor.flush ();
system.deadline_set (10s);
while (node1.active.size () != 2 || node2.active.size () != 2)
{
ASSERT_NO_ERROR (system.poll ());
}
// Update work with higher difficulty
auto work1 = node1.work_generate_blocking (send1->root (), difficulty1 + 1, boost::none);
auto work2 = node1.work_generate_blocking (send2->root (), difficulty2 + 1, boost::none);
std::error_code ec;
nano::state_block_builder builder;
send1 = std::shared_ptr<nano::state_block> (builder.from (*send1).work (*work1).build (ec));
nano::state_block_builder builder1;
send2 = std::shared_ptr<nano::state_block> (builder1.from (*send2).work (*work2).build (ec));
ASSERT_FALSE (ec);
auto modify_election = [&node1](auto block) {
auto hash (block->hash ());
nano::lock_guard<std::mutex> active_guard (node1.active.mutex);
auto existing (node1.active.roots.find (block->qualified_root ()));
ASSERT_NE (existing, node1.active.roots.end ());
auto election (existing->election);
ASSERT_EQ (election->status.winner->hash (), hash);
election->status.winner = block;
auto current (election->blocks.find (hash));
assert (current != election->blocks.end ());
current->second = block;
};
modify_election (send1);
modify_election (send2);
node1.process_active (send1);
node1.process_active (send2);
node1.block_processor.flush ();
system.deadline_set (10s);
bool done (false);
while (!done)
{
{
// node1
nano::lock_guard<std::mutex> guard1 (node1.active.mutex);
auto const existing1 (node1.active.roots.find (send1->qualified_root ()));
ASSERT_NE (existing1, node1.active.roots.end ());
auto const existing2 (node1.active.roots.find (send2->qualified_root ()));
ASSERT_NE (existing2, node1.active.roots.end ());
// node2
nano::lock_guard<std::mutex> guard2 (node2.active.mutex);
auto const existing3 (node2.active.roots.find (send1->qualified_root ()));
ASSERT_NE (existing3, node2.active.roots.end ());
auto const existing4 (node2.active.roots.find (send2->qualified_root ()));
ASSERT_NE (existing4, node2.active.roots.end ());
auto updated = (existing1->difficulty > difficulty1) && (existing2->difficulty > difficulty2);
auto propogated = (existing3->difficulty > difficulty1) && (existing4->difficulty > difficulty2);
done = updated && propogated;
}
ASSERT_NO_ERROR (system.poll ());
}
}
TEST (active_transactions, restart_dropped)
{
nano::system system;
nano::node_config node_config (nano::get_available_port (), system.logging);
node_config.enable_voting = false;
node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled;
auto & node = *system.add_node (node_config);
nano::genesis genesis;
auto send1 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount - nano::xrb_ratio, nano::test_genesis_key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (genesis.hash ())));
// Process only in ledger and emulate dropping the election
ASSERT_EQ (nano::process_result::progress, node.process (*send1).code);
{
nano::lock_guard<std::mutex> guard (node.active.mutex);
node.active.add_dropped_elections_cache (send1->qualified_root ());
}
uint64_t difficulty1 (0);
nano::work_validate (*send1, &difficulty1);
// Generate higher difficulty work
auto work2 (*system.work.generate (send1->root (), difficulty1));
uint64_t difficulty2 (0);
nano::work_validate (send1->root (), work2, &difficulty2);
ASSERT_GT (difficulty2, difficulty1);
// Process the same block with updated work
auto send2 (std::make_shared<nano::state_block> (*send1));
send2->block_work_set (work2);
node.process_active (send2);
// Wait until the block is in elections
system.deadline_set (5s);
bool done{ false };
while (!done)
{
{
nano::lock_guard<std::mutex> guard (node.active.mutex);
auto existing (node.active.roots.find (send2->qualified_root ()));
done = existing != node.active.roots.end ();
if (done)
{
ASSERT_EQ (difficulty2, existing->difficulty);
}
}
ASSERT_NO_ERROR (system.poll ());
}
// Verify the block was updated in the ledger
{
auto block (node.store.block_get (node.store.tx_begin_read (), send1->hash ()));
ASSERT_EQ (work2, block->block_work ());
}
// Drop election
node.active.erase (*send2);
// Try to restart election with the lower difficulty block, should not work since the block as lower work
node.process_active (send1);
system.deadline_set (5s);
while (node.block_processor.size () > 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_TRUE (node.active.empty ());
// Verify the block was not updated in the ledger
{
auto block (node.store.block_get (node.store.tx_begin_read (), send1->hash ()));
ASSERT_EQ (work2, block->block_work ());
}
}
TEST (active_transactions, vote_replays)
{
nano::system system;
nano::node_config node_config (nano::get_available_port (), system.logging);
node_config.enable_voting = false;
auto & node = *system.add_node (node_config);
nano::genesis genesis;
nano::keypair key;
std::error_code ec;
auto send1 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount - nano::Gxrb_ratio, key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (genesis.hash ())));
ASSERT_NE (nullptr, send1);
auto open1 (std::make_shared<nano::state_block> (key.pub, 0, key.pub, nano::Gxrb_ratio, send1->hash (), key.prv, key.pub, *system.work.generate (key.pub)));
ASSERT_NE (nullptr, open1);
node.process_active (send1);
node.process_active (open1);
node.block_processor.flush ();
ASSERT_EQ (2, node.active.size ());
// First vote is not a replay and confirms the election, second vote should be indeterminate since the election no longer exists
auto vote_send1 (std::make_shared<nano::vote> (nano::test_genesis_key.pub, nano::test_genesis_key.prv, 0, send1));
ASSERT_EQ (nano::vote_code::vote, node.active.vote (vote_send1));
ASSERT_EQ (1, node.active.size ());
ASSERT_EQ (nano::vote_code::indeterminate, node.active.vote (vote_send1));
// Open new account
auto vote_open1 (std::make_shared<nano::vote> (nano::test_genesis_key.pub, nano::test_genesis_key.prv, 0, open1));
ASSERT_EQ (nano::vote_code::vote, node.active.vote (vote_open1));
ASSERT_TRUE (node.active.empty ());
ASSERT_EQ (nano::vote_code::indeterminate, node.active.vote (vote_open1));
ASSERT_EQ (nano::Gxrb_ratio, node.ledger.weight (key.pub));
auto send2 (std::make_shared<nano::state_block> (key.pub, open1->hash (), key.pub, nano::Gxrb_ratio - 1, key.pub, key.prv, key.pub, *system.work.generate (open1->hash ())));
ASSERT_NE (nullptr, send2);
node.process_active (send2);
node.block_processor.flush ();
ASSERT_EQ (1, node.active.size ());
auto vote1_send2 (std::make_shared<nano::vote> (nano::test_genesis_key.pub, nano::test_genesis_key.prv, 0, send2));
auto vote2_send2 (std::make_shared<nano::vote> (key.pub, key.prv, 0, send2));
ASSERT_EQ (nano::vote_code::vote, node.active.vote (vote2_send2));
ASSERT_EQ (1, node.active.size ());
ASSERT_EQ (nano::vote_code::replay, node.active.vote (vote2_send2));
ASSERT_EQ (1, node.active.size ());
ASSERT_EQ (nano::vote_code::vote, node.active.vote (vote1_send2));
ASSERT_EQ (0, node.active.size ());
ASSERT_EQ (nano::vote_code::indeterminate, node.active.vote (vote1_send2));
ASSERT_EQ (nano::vote_code::indeterminate, node.active.vote (vote2_send2));
}
| 1 | 16,111 | Was changing this to a write transaction required? | nanocurrency-nano-node | cpp |
@@ -21,7 +21,8 @@ type Predicate struct {
Method string `json:"method"`
// Params are the parameters (or a subset of the parameters) used to call the actor method.
- Params []byte `json:"params"`
+ // The must all be individually abi encodable.
+ Params []interface{} `json:"params"`
}
// PaymentVoucher is a voucher for a payment channel that can be transferred off-chain but guarantees a future payment. | 1 | package types
import (
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/multiformats/go-multibase"
"github.com/filecoin-project/go-filecoin/address"
)
func init() {
cbor.RegisterCborType(Predicate{})
cbor.RegisterCborType(PaymentVoucher{})
}
// Predicate is an optional message that is sent to another actor and must return true for the voucher to be valid.
type Predicate struct {
// To is the address of the actor to which this predicate is addressed.
To address.Address `json:"to"`
// Method is the actor method this predicate will call.
Method string `json:"method"`
// Params are the parameters (or a subset of the parameters) used to call the actor method.
Params []byte `json:"params"`
}
// PaymentVoucher is a voucher for a payment channel that can be transferred off-chain but guarantees a future payment.
type PaymentVoucher struct {
// Channel is the id of this voucher's payment channel.
Channel ChannelID `json:"channel"`
// Payer is the address of the account that created the channel.
Payer address.Address `json:"payer"`
// Target is the address of the account that will receive funds from the channel.
Target address.Address `json:"target"`
// Amount is the FIL this voucher authorizes the target to redeemed from the channel.
Amount AttoFIL `json:"amount"`
// ValidAt is the earliest block height at which this voucher is valid.
ValidAt BlockHeight `json:"valid_at"`
// Condition defines a optional message that will be called and must return true before this voucher can be redeemed.
Condition *Predicate `json:"condition"`
// Signature is the signature of all the data in this voucher.
Signature Signature `json:"signature"`
}
// DecodeVoucher creates a *PaymentVoucher from a base58, Cbor-encoded one
func DecodeVoucher(voucherRaw string) (*PaymentVoucher, error) {
_, cborVoucher, err := multibase.Decode(voucherRaw)
if err != nil {
return nil, err
}
var voucher PaymentVoucher
err = cbor.DecodeInto(cborVoucher, &voucher)
if err != nil {
return nil, err
}
return &voucher, nil
}
// Encode creates a base58, Cbor-encoded string representation
func (voucher *PaymentVoucher) Encode() (string, error) {
cborVoucher, err := cbor.DumpObject(voucher)
if err != nil {
return "", err
}
return multibase.Encode(multibase.Base58BTC, cborVoucher)
}
| 1 | 18,839 | nit: I think "They"? | filecoin-project-venus | go |
@@ -0,0 +1,7 @@
+namespace Nethermind.Core2
+{
+ public static class SszLimit
+ {
+ public const ulong ValidatorRegistryLimit = 1_099_511_627_776;
+ }
+} | 1 | 1 | 22,968 | is it a result of some multiplication or some other meaningful number? would be nice to display as a power of something or the mult | NethermindEth-nethermind | .cs |
|
@@ -5,11 +5,11 @@ module Features
forum
office_hours
shows
- source_code
- video_tutorials
)
+ LICENSEABLE_FEATURES = %w(repositories video_tutorials)
FULFILLABLE_FEATURES = %w(mentor)
- ALL_FEATURES = GENERIC_FEATURES + FULFILLABLE_FEATURES
+ ALL_FEATURES = GENERIC_FEATURES + LICENSEABLE_FEATURES +
+ FULFILLABLE_FEATURES
def initialize(user:)
@user = user | 1 | module Features
class Factory
GENERIC_FEATURES = %w(
exercises
forum
office_hours
shows
source_code
video_tutorials
)
FULFILLABLE_FEATURES = %w(mentor)
ALL_FEATURES = GENERIC_FEATURES + FULFILLABLE_FEATURES
def initialize(user:)
@user = user
end
def new(feature_string_or_symbol)
@feature_string = feature_string_or_symbol.to_s
feature_class.new(user: @user)
end
private
def feature_class
if generic_feature?
Features::Generic
else
"Features::#{@feature_string.classify}".constantize
end
end
def generic_feature?
@feature_string.in? GENERIC_FEATURES
end
end
end
| 1 | 11,740 | I think this class is a good example of where Payload could significantly improve things. I'm okay with this class as it stands. If you merge this in, I'd like to have a go at refactoring it after introducing Payload. | thoughtbot-upcase | rb |
@@ -2,6 +2,7 @@
from __future__ import unicode_literals
import unittest
import os
+import pytest
import colander
import mock | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import os
import colander
import mock
import six
from kinto.core import includeme
from kinto.core import DEFAULT_SETTINGS
from pyramid import httpexceptions
from pyramid import request as pyramid_request
from pyramid import testing
from kinto.core.utils import (
native_value, strip_whitespace, random_bytes_hex, read_env, hmac_digest,
current_service, encode_header, decode_header, follow_subrequest,
build_request, dict_subset
)
from kinto.core.testing import DummyRequest
def build_real_request(wsgi_environ):
"""Build a Pyramid request, as if it was instantiated by Pyramid.
"""
config = testing.setUp(settings=DEFAULT_SETTINGS)
includeme(config)
request = pyramid_request.Request(wsgi_environ)
request.registry = config.registry
return request
class NativeValueTest(unittest.TestCase):
def test_simple_string(self):
self.assertEqual(native_value('value'), 'value')
def test_integer(self):
self.assertEqual(native_value('7'), 7)
def test_zero_and_one_coerce_to_integers(self):
self.assertEqual(native_value('1'), 1)
self.assertEqual(native_value('0'), 0)
def test_float(self):
self.assertEqual(native_value('3.14'), 3.14)
def test_true_values(self):
true_strings = ['True', 'on', 'true', 'yes']
true_values = [native_value(s) for s in true_strings]
self.assertTrue(all(true_values))
def test_false_values(self):
false_strings = ['False', 'off', 'false', 'no']
false_values = [native_value(s) for s in false_strings]
self.assertFalse(any(false_values))
def test_non_string_values(self):
self.assertEqual(native_value(7), 7)
self.assertEqual(native_value(True), True)
class StripWhitespaceTest(unittest.TestCase):
def test_removes_all_kinds_of_spaces(self):
value = " \t teaser \n \r"
self.assertEqual(strip_whitespace(value), 'teaser')
def test_does_remove_middle_spaces(self):
self.assertEqual(strip_whitespace('a b c'), 'a b c')
def test_idempotent_for_null_values(self):
self.assertEqual(strip_whitespace(colander.null), colander.null)
class CryptographicRandomBytesTest(unittest.TestCase):
def test_return_hex_string(self):
value = random_bytes_hex(16)
try:
int(value, 16)
except ValueError:
self.fail("%s is not an hexadecimal value." % value)
def test_return_right_length_string(self):
for x in range(2, 4):
value = random_bytes_hex(x)
self.assertEqual(len(value), x * 2)
def test_return_text_string(self):
value = random_bytes_hex(16)
self.assertIsInstance(value, six.text_type)
class HmacDigestTest(unittest.TestCase):
def test_supports_secret_as_text(self):
value = hmac_digest("blah", "input data")
self.assertTrue(value.startswith("d4f5c51db246c7faeb42240545b47274b6"))
def test_supports_secret_as_bytes(self):
value = hmac_digest(b"blah", "input data")
self.assertTrue(value.startswith("d4f5c51db246c7faeb42240545b47274b6"))
class ReadEnvironmentTest(unittest.TestCase):
def test_return_passed_value_if_not_defined_in_env(self):
self.assertEqual(read_env('missing', 12), 12)
def test_return_env_value_if_defined_in_env(self):
os.environ.setdefault('KINTO_CONF', 'abc')
self.assertEqual(read_env('KINTO_CONF', 12), 'abc')
def test_return_env_name_as_uppercase(self):
os.environ.setdefault('KINTO_NAME', 'abc')
self.assertEqual(read_env('kinto.name', 12), 'abc')
def test_return_env_value_is_coerced_to_python(self):
os.environ.setdefault('KINTO_CONF_NAME', '3.14')
self.assertEqual(read_env('kinto-conf.name', 12), 3.14)
class CurrentServiceTest(unittest.TestCase):
def test_current_service_returns_the_service_for_existing_patterns(self):
request = DummyRequest()
request.matched_route.pattern = '/buckets'
request.registry.cornice_services = {'/buckets': mock.sentinel.service}
self.assertEqual(current_service(request), mock.sentinel.service)
def test_current_service_returns_none_for_unexisting_patterns(self):
request = DummyRequest()
request.matched_route.pattern = '/unexisting'
request.registry.cornice_services = {}
self.assertEqual(current_service(request), None)
class BuildRequestTest(unittest.TestCase):
def test_built_request_has_kinto_core_custom_methods(self):
original = build_real_request({'PATH_INFO': '/foo'})
request = build_request(original, {"path": "bar"})
self.assertTrue(hasattr(request, 'current_service'))
class EncodeHeaderTest(unittest.TestCase):
def test_returns_a_string_if_passed_a_string(self):
entry = str('Toto')
value = encode_header(entry)
self.assertEqual(entry, value)
self.assertEqual(type(value), str)
def test_returns_a_string_if_passed_bytes(self):
entry = 'Toto'.encode('utf-8')
value = encode_header(entry)
self.assertEqual(type(value), str)
def test_returns_a_string_if_passed_bytes_and_encoding(self):
entry = 'Rémy'.encode('latin-1')
value = encode_header(entry, 'latin-1')
self.assertEqual(type(value), str)
def test_returns_a_string_if_passed_unicode(self):
entry = six.text_type('Rémy')
value = encode_header(entry)
self.assertEqual(type(value), str)
def test_returns_a_string_if_passed_unicode_with_encoding(self):
entry = six.text_type('Rémy')
value = encode_header(entry, 'latin-1')
self.assertEqual(type(value), str)
class DecodeHeaderTest(unittest.TestCase):
def test_returns_an_unicode_string_if_passed_a_string(self):
entry = 'Toto'
value = decode_header(entry)
self.assertEqual(entry, value)
def test_returns_an_unicode__string_if_passed_bytes(self):
entry = 'Toto'.encode('utf-8')
value = decode_header(entry)
self.assertEqual(type(value), six.text_type)
def test_returns_an_unicode__string_if_passed_bytes_and_encoding(self):
entry = 'Rémy'.encode('latin-1')
value = decode_header(entry, 'latin-1')
self.assertEqual(type(value), six.text_type)
class FollowSubrequestTest(unittest.TestCase):
def test_parent_and_bound_data_are_preserved(self):
request = DummyRequest()
request.invoke_subrequest.side_effect = (
httpexceptions.HTTPTemporaryRedirect, None)
subrequest = DummyRequest()
subrequest.parent = mock.sentinel.parent
subrequest.bound_data = mock.sentinel.bound_data
_, redirected = follow_subrequest(request, subrequest)
self.assertEqual(subrequest.parent, redirected.parent)
self.assertEqual(subrequest.bound_data, redirected.bound_data)
class DictSubsetTest(unittest.TestCase):
def test_extract_by_keys(self):
obtained = dict_subset(dict(a=1, b=2), ["b"])
expected = dict(b=2)
self.assertEqual(obtained, expected)
def test_is_noop_if_no_keys(self):
obtained = dict_subset(dict(a=1, b=2), [])
expected = dict()
self.assertEqual(obtained, expected)
def test_ignores_unknown_keys(self):
obtained = dict_subset(dict(a=1, b=2), ["a", "c"])
expected = dict(a=1)
self.assertEqual(obtained, expected)
def test_ignores_duplicated_keys(self):
obtained = dict_subset(dict(a=1, b=2), ["a", "a"])
expected = dict(a=1)
self.assertEqual(obtained, expected)
def test_can_filter_subobjects(self):
obtained = dict_subset(dict(a=1, b=dict(c=2, d=3)), ["a", "b.c"])
expected = dict(a=1, b=dict(c=2))
self.assertEqual(obtained, expected)
def test_can_filter_subobjects_keys(self):
input = dict(a=1, b=dict(c=2, d=3, e=4))
obtained = dict_subset(input, ["a", "b.d", "b.e"])
expected = dict(a=1, b=dict(d=3, e=4))
self.assertEqual(obtained, expected)
def test_can_filter_subobjects_recursively(self):
input = dict(a=1, b=dict(c=2, d=dict(e=4, f=5)))
obtained = dict_subset(input, ["a", "b.d.e"])
expected = dict(a=1, b=dict(d=dict(e=4)))
self.assertEqual(obtained, expected)
def test_ignores_if_subobject_is_not_dict(self):
input = dict(a=1, b=dict(c=2, d=3))
obtained = dict_subset(input, ["a", "b.c.d", "b.d"])
expected = dict(a=1, b=dict(c=2, d=3))
self.assertEqual(obtained, expected)
| 1 | 9,950 | Just a small tip: I think you don't need to include pytest here. You can run tests using: - `pytest tests/path/to/my/test` under the virtualenv to run a single test. - `pytest tests/` under the virtualenv to run all the tests. - `make tests` to run all the tests for all python versions, like travis do. Hope this helps! :) | Kinto-kinto | py |
@@ -31,6 +31,15 @@ from .options import Store
from .util import unique_iterator, group_sanitizer, label_sanitizer
+def sanitizer(name, replacements={':':'_', '/':'_', '\\':'_'}):
+ """
+ String sanitizer to avoid problematic characters in filenames.
+ """
+ for k,v in replacements.items():
+ name = name.replace(k,v)
+ return name
+
+
class Reference(param.Parameterized):
"""
A Reference allows access to an object to be deferred until it is | 1 | """
Module defining input/output interfaces to HoloViews.
There are two components for input/output:
Exporters: Process (composite) HoloViews objects one at a time. For
instance, an exporter may render a HoloViews object as a
svg or perhaps pickle it.
Archives: A collection of HoloViews objects that are first collected
then processed together. For instance, collecting HoloViews
objects for a report then generating a PDF or collecting
HoloViews objects to dump to HDF5.
"""
from __future__ import absolute_import
import re, os, time, string, zipfile, tarfile, shutil, itertools, pickle
from collections import defaultdict
from io import BytesIO
from hashlib import sha256
import param
from param.parameterized import bothmethod
from .dimension import LabelledData
from .element import Collator, Element
from .layout import Layout
from .ndmapping import OrderedDict, NdMapping, UniformNdMapping
from .options import Store
from .util import unique_iterator, group_sanitizer, label_sanitizer
class Reference(param.Parameterized):
"""
A Reference allows access to an object to be deferred until it is
needed in the appropriate context. References are used by
Collector to capture the state of an object at collection time.
One particularly important property of references is that they
should be pickleable. This means that you can pickle Collectors so
that you can unpickle them in different environments and still
collect from the required object.
A Reference only needs to have a resolved_type property and a
resolve method. The constructor will take some specification of
where to find the target object (may be the object itself).
"""
@property
def resolved_type(self):
"""
Returns the type of the object resolved by this references. If
multiple types are possible, the return is a tuple of types.
"""
raise NotImplementedError
def resolve(self, container=None):
"""
Return the referenced object. Optionally, a container may be
passed in from which the object is to be resolved.
"""
raise NotImplementedError
class Exporter(param.ParameterizedFunction):
"""
An Exporter is a parameterized function that accepts a HoloViews
object and converts it to a new some new format. This mechanism is
designed to be very general so here are a few examples:
Pickling: Native Python, supported by HoloViews.
Rendering: Any plotting backend may be used (default uses matplotlib)
Storage: Saving to a database (e.g SQL), HDF5 etc.
"""
# Mime-types that need encoding as utf-8 upon export
utf8_mime_types = ['image/svg+xml', 'text/html', 'text/json']
key_fn = param.Callable(doc="""
Function that generates the metadata key from the HoloViews
object being saved. The metadata key is a single
high-dimensional key of values associated with dimension labels.
The returned dictionary must have string keys and simple
literals that may be conviently used for dictionary-style
indexing. Returns an empty dictionary by default.""")
info_fn = param.Callable(lambda x: {'repr':repr(x)}, doc="""
Function that generates additional metadata information from the
HoloViews object being saved.
Unlike metadata keys, the information returned may be unsuitable
for use as a key index and may include entries such as the
object's repr. Regardless, the info metadata should still only
contain items that will be quick to load and inspect. """)
@classmethod
def encode(cls, entry):
"""
Classmethod that applies conditional encoding based on
mime-type. Given an entry as returned by __call__ return the
data in the appropriate encoding.
"""
(data, info) = entry
if info['mime_type'] in cls.utf8_mime_types:
return data.encode('utf-8')
else:
return data
@bothmethod
def _filename(self_or_cls, filename):
"Add the file extension if not already present"
if not filename.endswith(self_or_cls.file_ext):
return '%s.%s' % (filename, self_or_cls.file_ext)
else:
return filename
@bothmethod
def _merge_metadata(self_or_cls, obj, fn, *dicts):
"""
Returns a merged metadata info dictionary from the supplied
function and additional dictionaries
"""
merged = dict([(k,v) for d in dicts for (k,v) in d.items()])
return dict(merged, **fn(obj)) if fn else merged
def __call__(self, obj, fmt=None):
"""
Given a HoloViews object return the raw exported data and
corresponding metadata as the tuple (data, metadata). The
metadata should include:
'file-ext' : The file extension if applicable (else empty string)
'mime_type': The mime-type of the data.
The fmt argument may be used with exporters that support multiple
output formats. If not supplied, the exporter is to pick an
appropriate format automatically.
"""
raise NotImplementedError("Exporter not implemented.")
@bothmethod
def save(self_or_cls, obj, basename, fmt=None, key={}, info={}, **kwargs):
"""
Similar to the call method except saves exporter data to disk
into a file with specified basename. For exporters that
support multiple formats, the fmt argument may also be
supplied (which typically corresponds to the file-extension).
The supplied metadata key and info dictionaries will be used
to update the output of the relevant key and info functions
which is then saved (if supported).
"""
raise NotImplementedError("Exporter save method not implemented.")
class Importer(param.ParameterizedFunction):
"""
An Importer is a parameterized function that accepts some data in
some format and returns a HoloViews object. This mechanism is
designed to be very general so here are a few examples:
Unpickling: Native Python, supported by HoloViews.
Servers: Loading data over a network connection.
Storage: Loading from a database (e.g SQL), HDF5 etc.
"""
def __call__(self, data):
"""
Given raw data in the appropriate format return the
corresponding HoloViews object. Acts as the inverse of
Exporter when supplied the data portion of an Exporter's
output.
"""
raise NotImplementedError("Importer not implemented.")
@bothmethod
def load(self_or_cls, src, entries=None):
"""
Given some source (e.g. a filename, a network connection etc),
return the loaded HoloViews object.
"""
raise NotImplementedError("Importer load method not implemented.")
@bothmethod
def loader(self_or_cls, kwargs):
return self_or_cls.load(**kwargs)
@bothmethod
def info(self_or_cls, src):
"""
Returns the 'info' portion of the metadata (if available).
"""
raise NotImplementedError("Importer info method not implemented.")
@bothmethod
def key(self_or_cls, src):
"""
Returns the metadata key (if available).
"""
raise NotImplementedError("Importer keys method not implemented.")
class Serializer(Exporter):
"A generic exporter that supports any arbitrary serializer"
serializer=param.Callable(Store.dumps, doc="""
The serializer function, set to Store.dumps by default. The
serializer should take an object and output a serialization as
a string or byte stream.
Any suitable serializer may be used. For instance, pickle.dumps
may be used although this will not save customized options.""")
mime_type=param.String('application/python-pickle', allow_None=True, doc="""
The mime-type associated with the serializer (if applicable).""")
file_ext = param.String('pkl', doc="""
The file extension associated with the corresponding file
format (if applicable).""")
def __call__(self, obj, **kwargs):
data = self.serializer(obj)
return data, {'file-ext': self.file_ext, 'mime_type':self.mime_type}
@bothmethod
def save(self_or_cls, obj, filename, info={}, key={}, **kwargs):
data, base_info = self_or_cls(obj, **kwargs)
key = self_or_cls._merge_metadata(obj, self_or_cls.key_fn, key)
info = self_or_cls._merge_metadata(obj, self_or_cls.info_fn, info, base_info)
metadata, _ = self_or_cls({'info':info, 'key':key}, **kwargs)
filename = self_or_cls._filename(filename)
with open(filename, 'ab') as f:
f.write(metadata)
f.write(data)
class Deserializer(Importer):
"A generic importer that supports any arbitrary de-serializer."
deserializer=param.Callable(Store.load, doc="""
The deserializer function, set to Store.load by default. The
deserializer should take a file-like object that can be read
from until the first object has been deserialized. If the file
has not been exhausted, the deserializer should be able to
continue parsing and loading objects.
Any suitable deserializer may be used. For instance,
pickle.load may be used although this will not load customized
options.""")
def __call__(self, data):
return self.deserializer(BytesIO(data))
@bothmethod
def load(self_or_cls, filename):
with open(filename, 'rb') as f:
data = self_or_cls.deserializer(f)
try:
data = self_or_cls.deserializer(f)
except: pass
return data
@bothmethod
def key(self_or_cls, filename):
with open(filename, "rb") as f:
metadata = self_or_cls.deserializer(f)
metadata = metadata if isinstance(metadata, dict) else {}
return metadata.get('key', {})
@bothmethod
def info(self_or_cls, filename):
with open(filename, "rb") as f:
metadata = self_or_cls.deserializer(f)
metadata = metadata if isinstance(metadata, dict) else {}
return metadata.get('info', {})
class Pickler(Exporter):
"""
The recommended pickler for serializing HoloViews object to a .hvz
file (a simple zip archive of pickle files). In addition to the
functionality offered by Store.dump and Store.load, this file
format offers three additional features:
1. Optional (zip) compression.
2. Ability to save and load components of a Layout independently.
3. Support for metadata per saved component.
The output file with the .hvz file extension is simply a zip
archive containing pickled HoloViews objects.
"""
protocol = param.Integer(default=2, doc="""
The pickling protocol where 0 is ASCII, 1 supports old Python
versions and 2 is efficient for new style classes.""")
compress = param.Boolean(default=True, doc="""
Whether compression is enabled or not""")
mime_type = 'application/zip'
file_ext = 'hvz'
def __call__(self, obj, key={}, info={}, **kwargs):
buff = BytesIO()
self.save(obj, buff, key=key, info=info, **kwargs)
buff.seek(0)
return buff.read(), {'file-ext': 'hvz', 'mime_type':self.mime_type}
@bothmethod
def save(self_or_cls, obj, filename, key={}, info={}, **kwargs):
base_info = {'file-ext': 'hvz', 'mime_type':self_or_cls.mime_type}
key = self_or_cls._merge_metadata(obj, self_or_cls.key_fn, key)
info = self_or_cls._merge_metadata(obj, self_or_cls.info_fn, info, base_info)
compression = zipfile.ZIP_STORED if self_or_cls.compress else zipfile.ZIP_DEFLATED
filename = self_or_cls._filename(filename) if isinstance(filename, str) else filename
with zipfile.ZipFile(filename, 'w', compression=compression) as f:
if isinstance(obj, Layout):
entries = ['.'.join(k) for k in obj.data.keys()]
components = list(obj.data.values())
entries = entries if len(entries) > 1 else [entries[0]+'(L)']
else:
entries = ['%s.%s' % (group_sanitizer(obj.group, False),
label_sanitizer(obj.label, False))]
components = [obj]
for component, entry in zip(components, entries):
f.writestr(entry,
Store.dumps(component, protocol=self_or_cls.protocol))
f.writestr('metadata',
pickle.dumps({'info':info, 'key':key}))
class Unpickler(Importer):
"""
The inverse of Pickler used to load the .hvz file format which is
simply a zip archive of pickle objects.
Unlike a regular pickle file, info and key metadata as well as
individual components of a Layout may be loaded without needing to
load the entire file into memory.
The components that may be individually loaded may be found using
the entries method.
"""
def __call__(self, data, entries=None):
buff = BytesIO(data)
return self.load(buff, entries=entries)
@bothmethod
def load(self_or_cls, filename, entries=None):
components, single_layout = [], False
entries = entries if entries else self_or_cls.entries(filename)
with zipfile.ZipFile(filename, 'r') as f:
for entry in entries:
if entry not in f.namelist():
raise Exception("Entry %s not available" % entry)
components.append(Store.loads(f.read(entry)))
single_layout = entry.endswith('(L)')
if len(components) == 1 and not single_layout:
return components[0]
else:
return Layout.from_values(components)
@bothmethod
def _load_metadata(self_or_cls, filename, name):
with zipfile.ZipFile(filename, 'r') as f:
if 'metadata' not in f.namelist():
raise Exception("No metadata available")
metadata = pickle.loads(f.read('metadata'))
if name not in metadata:
raise KeyError("Entry %s is missing from the metadata" % name)
return metadata[name]
@bothmethod
def key(self_or_cls, filename):
return self_or_cls._load_metadata(filename, 'key')
@bothmethod
def info(self_or_cls, filename):
return self_or_cls._load_metadata(filename, 'info')
@bothmethod
def entries(self_or_cls, filename):
with zipfile.ZipFile(filename, 'r') as f:
return [el for el in f.namelist() if el != 'metadata']
@bothmethod
def collect(self_or_cls, files, drop=[], metadata=True):
"""
Given a list or NdMapping type containing file paths return a
Layout of Collators, which can be called to load a given set
of files using the current Importer.
If supplied as a list each file is expected to disambiguate
itself with contained metadata. If an NdMapping type is
supplied additional key dimensions may be supplied as long as
they do not clash with the file metadata. Any key dimension
may be dropped by name by supplying a drop argument.
"""
aslist = not isinstance(files, (NdMapping, Element))
if isinstance(files, Element):
files = Collator(files)
file_kdims = files.kdims
else:
file_kdims = files.kdims
drop_extra = files.drop if isinstance(files, Collator) else []
mdata_dims = []
if metadata:
fnames = [fname[0] if isinstance(fname, tuple) else fname
for fname in files.values()]
mdata_dims = {kdim for fname in fnames
for kdim in self_or_cls.key(fname).keys()}
file_dims = set(files.dimensions('key', label=True))
added_dims = set(mdata_dims) - file_dims
overlap_dims = file_dims & set(mdata_dims)
kwargs = dict(kdims=file_kdims + sorted(added_dims),
vdims=['filename', 'entries'],
value_transform=self_or_cls.loader,
drop=drop_extra + drop)
layout_data = defaultdict(lambda: Collator(None, **kwargs))
for key, fname in files.data.items():
fname = fname[0] if isinstance(fname, tuple) else fname
mdata = self_or_cls.key(fname) if metadata else {}
for odim in overlap_dims:
kval = key[files.get_dimension_index(odim)]
if kval != mdata[odim]:
raise KeyError("Metadata supplies inconsistent "
"value for dimension %s" % odim)
mkey = tuple(mdata.get(d, None) for d in added_dims)
key = mkey if aslist else key + mkey
if isinstance(fname, tuple) and len(fname) == 1:
(fname,) = fname
for entry in self_or_cls.entries(fname):
layout_data[entry][key] = (fname, [entry])
return Layout(layout_data.items())
class Archive(param.Parameterized):
"""
An Archive is a means to collect and store a collection of
HoloViews objects in any number of different ways. Examples of
possible archives:
* Generating tar or zip files (compressed or uncompressed).
* Collating a report or document (e.g. PDF, HTML, LaTex).
* Storing a collection of HoloViews objects to a database or HDF5.
"""
exporters= param.List(default=[], doc="""
The exporter functions used to convert HoloViews objects into the
appropriate format(s).""" )
def add(self, obj, *args, **kwargs):
"""
Add a HoloViews object to the archive.
"""
raise NotImplementedError
def export(self,*args, **kwargs):
"""
Finalize and close the archive.
"""
raise NotImplementedError
def simple_name_generator(obj):
"""
Simple name_generator designed for HoloViews objects.
Objects are labeled with {group}-{label} for each nested
object, based on a depth-first search. Adjacent objects with
identical representations yield only a single copy of the
representation, to avoid long names for the common case of
a container whose element(s) share the same group and label.
"""
if isinstance(obj, LabelledData):
labels = obj.traverse(lambda x:
(x.group + ('-' +x.label if x.label else '')))
labels=[l[0] for l in itertools.groupby(labels)]
obj_str = ','.join(labels)
else:
obj_str = repr(obj)
return obj_str
class FileArchive(Archive):
"""
A file archive stores files on disk, either unpacked in a
directory or in an archive format (e.g. a zip file).
"""
exporters= param.List(default=[Pickler], doc="""
The exporter functions used to convert HoloViews objects into
the appropriate format(s).""")
dimension_formatter = param.String("{name}_{range}", doc="""
A string formatter for the output file based on the
supplied HoloViews objects dimension names and values.
Valid fields are the {name}, {range} and {unit} of the
dimensions.""")
object_formatter = param.Callable(default=simple_name_generator, doc="""
Callable that given an object returns a string suitable for
inclusion in file and directory names. This is what generates
the value used in the {obj} field of the filename
formatter.""")
filename_formatter = param.String('{dimensions},{obj}', doc="""
A string formatter for output filename based on the HoloViews
object that is being rendered to disk.
The available fields are the {type}, {group}, {label}, {obj}
of the holoviews object added to the archive as well as
{timestamp}, {obj} and {SHA}. The {timestamp} is the export
timestamp using timestamp_format, {obj} is the object
representation as returned by object_formatter and {SHA} is
the SHA of the {obj} value used to compress it into a shorter
string.""")
timestamp_format = param.String("%Y_%m_%d-%H_%M_%S", doc="""
The timestamp format that will be substituted for the
{timestamp} field in the export name.""")
root = param.String('.', doc="""
The root directory in which the output directory is
located. May be an absolute or relative path.""")
archive_format = param.ObjectSelector('zip', objects=['zip', 'tar'], doc="""
The archive format to use if there are multiple files and pack
is set to True. Supported formats include 'zip' and 'tar'.""")
pack = param.Boolean(default=False, doc="""
Whether or not to pack to contents into the specified archive
format. If pack is False, the contents will be output to a
directory.
Note that if there is only a single file in the archive, no
packing will occur and no directory is created. Instead, the
file is treated as a single-file archive.""")
export_name = param.String(default='{timestamp}', doc="""
The name assigned to the overall export. If an archive file is
used, this is the correspond filename (e.g of the exporter zip
file). Alternatively, if unpack=False, this is the name of the
output directory. Lastly, for archives of a single file, this
is the basename of the output file.
The {timestamp} field is available to include the timestamp at
the time of export in the chosen timestamp format.""")
unique_name = param.Boolean(default=False, doc="""
Whether the export name should be made unique with a numeric
suffix. If set to False, any existing export of the same name
will be removed and replaced.""")
max_filename = param.Integer(default=100, bounds=(0,None), doc="""
Maximum length to enforce on generated filenames. 100 is the
practical maximum for zip and tar file generation, but you may
wish to use a lower value to avoid long filenames.""")
flush_archive = param.Boolean(default=True, doc="""
Flushed the contents of the archive after export.
""")
ffields = {'type', 'group', 'label', 'obj', 'SHA', 'timestamp', 'dimensions'}
efields = {'timestamp'}
@classmethod
def parse_fields(cls, formatter):
"Returns the format fields otherwise raise exception"
if formatter is None: return []
try:
parse = list(string.Formatter().parse(formatter))
return set(f for f in list(zip(*parse))[1] if f is not None)
except:
raise SyntaxError("Could not parse formatter %r" % formatter)
def __init__(self, **params):
super(FileArchive, self).__init__(**params)
# Items with key: (basename,ext) and value: (data, info)
self._files = OrderedDict()
self._validate_formatters()
def _dim_formatter(self, obj):
if not obj: return ''
key_dims = obj.traverse(lambda x: x.kdims, [UniformNdMapping])
constant_dims = obj.traverse(lambda x: x.cdims)
dims = []
map(dims.extend, key_dims + constant_dims)
dims = unique_iterator(dims)
dim_strings = []
for dim in dims:
lower, upper = obj.range(dim.name)
lower, upper = (dim.pprint_value(lower),
dim.pprint_value(upper))
if lower == upper:
range = dim.pprint_value(lower)
else:
range = "%s-%s" % (lower, upper)
formatters = {'name': dim.name, 'range': range,
'unit': dim.unit}
dim_strings.append(self.dimension_formatter.format(**formatters))
return '_'.join(dim_strings)
def _validate_formatters(self):
if not self.parse_fields(self.filename_formatter).issubset(self.ffields):
raise Exception("Valid filename fields are: %s" % ','.join(sorted(self.ffields)))
elif not self.parse_fields(self.export_name).issubset(self.efields):
raise Exception("Valid export fields are: %s" % ','.join(sorted(self.efields)))
try: time.strftime(self.timestamp_format, tuple(time.localtime()))
except: raise Exception("Timestamp format invalid")
def add(self, obj=None, filename=None, data=None, info={}, **kwargs):
"""
If a filename is supplied, it will be used. Otherwise, a
filename will be generated from the supplied object. Note that
if the explicit filename uses the {timestamp} field, it will
be formatted upon export.
The data to be archived is either supplied explicitly as
'data' or automatically rendered from the object.
"""
if [filename, obj] == [None, None]:
raise Exception("Either filename or a HoloViews object is "
"needed to create an entry in the archive.")
elif obj is None and not self.parse_fields(filename).issubset({'timestamp'}):
raise Exception("Only the {timestamp} formatter may be used unless an object is supplied.")
elif [obj, data] == [None, None]:
raise Exception("Either an object or explicit data must be "
"supplied to create an entry in the archive.")
elif data and 'mime_type' not in info:
raise Exception("The mime-type must be supplied in the info dictionary "
"when supplying data directly")
self._validate_formatters()
entries = []
if data is None:
for exporter in self.exporters:
rendered = exporter(obj)
if rendered is None: continue
(data, new_info) = rendered
info = dict(info, **new_info)
entries.append((data, info))
else:
entries.append((data, info))
for (data, info) in entries:
self._add_content(obj, data, info, filename=filename)
def _add_content(self, obj, data, info, filename=None):
(unique_key, ext) = self._compute_filename(obj, info, filename=filename)
self._files[(unique_key, ext)] = (data, info)
def _compute_filename(self, obj, info, filename=None):
if filename is None:
hashfn = sha256()
obj_str = 'None' if obj is None else self.object_formatter(obj)
dimensions = self._dim_formatter(obj)
dimensions = dimensions if dimensions else ''
hashfn.update(obj_str.encode('utf-8'))
format_values = {'timestamp': '{timestamp}',
'dimensions': dimensions,
'group': getattr(obj, 'group', 'no-group'),
'label': getattr(obj, 'label', 'no-label'),
'type': obj.__class__.__name__,
'obj': obj_str,
'SHA': hashfn.hexdigest()}
filename = self._format(self.filename_formatter,
dict(info, **format_values))
filename = self._normalize_name(filename)
ext = info.get('file-ext', '')
(unique_key, ext) = self._unique_name(filename, ext,
self._files.keys(), force=True)
return (unique_key, ext)
def _zip_archive(self, export_name, files, root):
archname = '.'.join(self._unique_name(export_name, 'zip', root))
with zipfile.ZipFile(os.path.join(root, archname), 'w') as zipf:
for (basename, ext), entry in files:
filename = self._truncate_name(basename, ext)
zipf.writestr(('%s/%s' % (export_name, filename)),Exporter.encode(entry))
def _tar_archive(self, export_name, files, root):
archname = '.'.join(self._unique_name(export_name, 'tar', root))
with tarfile.TarFile(os.path.join(root, archname), 'w') as tarf:
for (basename, ext), entry in files:
filename = self._truncate_name(basename, ext)
tarinfo = tarfile.TarInfo('%s/%s' % (export_name, filename))
filedata = Exporter.encode(entry)
tarinfo.size = len(filedata)
tarf.addfile(tarinfo, BytesIO(filedata))
def _single_file_archive(self, export_name, files, root):
((basename, ext), entry) = files[0]
full_fname = '%s_%s' % (export_name, basename)
(unique_name, ext) = self._unique_name(full_fname, ext, root)
filename = self._truncate_name(self._normalize_name(unique_name), ext=ext)
fpath = os.path.join(root, filename)
with open(fpath, 'wb') as f:
f.write(Exporter.encode(entry))
def _directory_archive(self, export_name, files, root):
output_dir = os.path.join(root, self._unique_name(export_name,'', root)[0])
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
for (basename, ext), entry in files:
filename = self._truncate_name(basename, ext)
fpath = os.path.join(output_dir, filename)
with open(fpath, 'wb') as f:
f.write(Exporter.encode(entry))
def _unique_name(self, basename, ext, existing, force=False):
"""
Find a unique basename for a new file/key where existing is
either a list of (basename, ext) pairs or an absolute path to
a directory.
By default, uniqueness is enforced dependning on the state of
the unique_name parameter (for export names). If force is
True, this parameter is ignored and uniqueness is guaranteed.
"""
skip = False if force else (not self.unique_name)
if skip: return (basename, ext)
ext = '' if ext is None else ext
if isinstance(existing, str):
split = [os.path.splitext(el)
for el in os.listdir(os.path.abspath(existing))]
existing = [(n, ex if not ex else ex[1:]) for (n, ex) in split]
new_name, counter = basename, 1
while (new_name, ext) in existing:
new_name = basename+'-'+str(counter)
counter += 1
return (new_name, ext)
def _truncate_name(self, basename, ext='', tail=10, join='...', maxlen=None):
maxlen = self.max_filename if maxlen is None else maxlen
max_len = maxlen-len(ext)
if len(basename) > max_len:
start = basename[:max_len-(tail + len(join))]
end = basename[-tail:]
basename = start + join + end
filename = '%s.%s' % (basename, ext) if ext else basename
return filename
def _normalize_name(self, basename):
basename=re.sub('-+','-',basename)
basename=re.sub('^[-,_]','',basename)
return basename.replace(' ', '_')
def export(self, timestamp=None, info={}):
"""
Export the archive, directory or file.
"""
tval = tuple(time.localtime()) if timestamp is None else timestamp
tstamp = time.strftime(self.timestamp_format, tval)
info = dict(info, timestamp=tstamp)
export_name = self._format(self.export_name, info)
files = [((self._format(base, info), ext), val)
for ((base, ext), val) in self._files.items()]
root = os.path.abspath(self.root)
# Make directory and populate if multiple files and not packed
if len(self) > 1 and not self.pack:
self._directory_archive(export_name, files, root)
elif len(files) == 1:
self._single_file_archive(export_name, files, root)
elif self.archive_format == 'zip':
self._zip_archive(export_name, files, root)
elif self.archive_format == 'tar':
self._tar_archive(export_name, files, root)
if self.flush_archive:
self._files = OrderedDict()
def _format(self, formatter, info):
filtered = {k:v for k,v in info.items()
if k in self.parse_fields(formatter)}
return formatter.format(**filtered)
def __len__(self):
"The number of files currently specified in the archive"
return len(self._files)
def __repr__(self):
return self.pprint()
def contents(self, maxlen=70):
"Print the current (unexported) contents of the archive"
lines = []
if len(self._files) == 0:
print("Empty %s" % self.__class__.__name__)
return
fnames = [self._truncate_name(maxlen=maxlen, *k) for k in self._files]
max_len = max([len(f) for f in fnames])
for name,v in zip(fnames, self._files.values()):
mime_type = v[1].get('mime_type', 'no mime type')
lines.append('%s : %s' % (name.ljust(max_len), mime_type))
print('\n'.join(lines))
def listing(self):
"Return a list of filename entries currently in the archive"
return ['.'.join([f,ext]) if ext else f for (f,ext) in self._files.keys()]
| 1 | 16,187 | Might be worth sorting the replacement items so this is deterministic. | holoviz-holoviews | py |
@@ -29,6 +29,11 @@ type ChaosDashboardConfig struct {
EnableLeaderElection bool `envconfig:"ENABLE_LEADER_ELECTION"`
Database *DatabaseConfig
PersistTTL *PersistTTLConfig
+ // ClusterScoped means control Chaos Object in cluster level(all namespace),
+ ClusterScoped bool `envconfig:"CLUSTER_SCOPED" default:"true"`
+ // TargetNamespace is the target namespace to injecting chaos.
+ // It only works with ClusterScoped is false;
+ TargetNamespace string `envconfig:"TARGET_NAMESPACE" default:""`
}
// PersistTTLConfig defines the configuration of ttl | 1 | // Copyright 2020 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"time"
"github.com/kelseyhightower/envconfig"
"github.com/chaos-mesh/chaos-mesh/pkg/ttlcontroller"
)
// ChaosDashboardConfig defines the configuration for Chaos Dashboard
type ChaosDashboardConfig struct {
ListenHost string `envconfig:"LISTEN_HOST" default:"0.0.0.0"`
ListenPort int `envconfig:"LISTEN_PORT" default:"2333"`
MetricAddress string `envconfig:"METRIC_ADDRESS"`
EnableLeaderElection bool `envconfig:"ENABLE_LEADER_ELECTION"`
Database *DatabaseConfig
PersistTTL *PersistTTLConfig
}
// PersistTTLConfig defines the configuration of ttl
type PersistTTLConfig struct {
SyncPeriod string `envconfig:"CLEAN_SYNC_PERIOD" default:"12h"`
Event string `envconfig:"TTL_EVENT" default:"168h"` // one week
Experiment string `envconfig:"TTL_EXPERIMENT" default:"336h"` // two weeks
}
// DatabaseConfig defines the configuration for databases
type DatabaseConfig struct {
// Archive Chaos Experiments to DB
Archive bool
Driver string `envconfig:"DATABASE_DRIVER" default:"sqlite3"`
Datasource string `envconfig:"DATABASE_DATASOURCE" default:"core.sqlite"`
Secret string `envconfig:"DATABASE_SECRET"`
}
// EnvironChaosDashboard returns the settings from the environment.
func EnvironChaosDashboard() (*ChaosDashboardConfig, error) {
cfg := ChaosDashboardConfig{}
err := envconfig.Process("", &cfg)
return &cfg, err
}
// ParsePersistTTLConfig parse PersistTTLConfig to persistTTLConfigParsed.
func ParsePersistTTLConfig(config *PersistTTLConfig) (*ttlcontroller.TTLconfig, error) {
SyncPeriod, err := time.ParseDuration(config.SyncPeriod)
if err != nil {
return nil, err
}
Event, err := time.ParseDuration(config.Event)
if err != nil {
return nil, err
}
Experiment, err := time.ParseDuration(config.Experiment)
if err != nil {
return nil, err
}
return &ttlcontroller.TTLconfig{
DatabaseTTLResyncPeriod: SyncPeriod,
EventTTL: Event,
ArchiveExperimentTTL: Experiment,
}, nil
}
| 1 | 17,906 | Should we set the default `TargetNamespace ` to `chaos-testing`? I noticed that the installation script defaults to chaos-testing. | chaos-mesh-chaos-mesh | go |
@@ -31,7 +31,6 @@ namespace MvvmCross.ViewModels
{
try
{
- await Task.Yield();
await task;
}
catch(Exception e) | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MS-PL license.
// See the LICENSE file in the project root for more information.
using System;
using System.ComponentModel;
using System.Threading.Tasks;
namespace MvvmCross.ViewModels
{
/// <summary>
/// Watches a task and raises property-changed notifications when the task completes.
/// </summary>
public sealed class MvxNotifyTask : INotifyPropertyChanged
{
private Action<Exception> _onException;
/// <summary>
/// Initializes a task notifier watching the specified task.
/// </summary>
/// <param name="task">The task to watch.</param>
/// <param name="onException">Callback to be run when an error happens</param>
private MvxNotifyTask(Task task, Action<Exception> onException)
{
Task = task;
_onException = onException;
TaskCompleted = MonitorTaskAsync(task);
}
private async Task MonitorTaskAsync(Task task)
{
try
{
await Task.Yield();
await task;
}
catch(Exception e)
{
_onException?.Invoke(e);
}
finally
{
NotifyProperties(task);
}
}
private void NotifyProperties(Task task)
{
var propertyChanged = PropertyChanged;
if(propertyChanged == null)
return;
if(task.IsCanceled)
{
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("Status"));
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("IsCanceled"));
}
else if(task.IsFaulted)
{
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("Exception"));
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("InnerException"));
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("ErrorMessage"));
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("Status"));
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("IsFaulted"));
}
else
{
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("Status"));
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("IsSuccessfullyCompleted"));
}
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("IsCompleted"));
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("IsNotCompleted"));
}
/// <summary>
/// Gets the task being watched. This property never changes and is never <c>null</c>.
/// </summary>
public Task Task { get; private set; }
/// <summary>
/// Gets a task that completes successfully when <see cref="Task"/> completes (successfully, faulted, or canceled). This property never changes and is never <c>null</c>.
/// </summary>
public Task TaskCompleted { get; private set; }
/// <summary>
/// Gets the current task status. This property raises a notification when the task completes.
/// </summary>
public TaskStatus Status { get { return Task.Status; } }
/// <summary>
/// Gets whether the task has completed. This property raises a notification when the value changes to <c>true</c>.
/// </summary>
public bool IsCompleted { get { return Task.IsCompleted; } }
/// <summary>
/// Gets whether the task is busy (not completed). This property raises a notification when the value changes to <c>false</c>.
/// </summary>
public bool IsNotCompleted { get { return !Task.IsCompleted; } }
/// <summary>
/// Gets whether the task has completed successfully. This property raises a notification when the value changes to <c>true</c>.
/// </summary>
public bool IsSuccessfullyCompleted { get { return Task.Status == TaskStatus.RanToCompletion; } }
/// <summary>
/// Gets whether the task has been canceled. This property raises a notification only if the task is canceled (i.e., if the value changes to <c>true</c>).
/// </summary>
public bool IsCanceled { get { return Task.IsCanceled; } }
/// <summary>
/// Gets whether the task has faulted. This property raises a notification only if the task faults (i.e., if the value changes to <c>true</c>).
/// </summary>
public bool IsFaulted { get { return Task.IsFaulted; } }
/// <summary>
/// Gets the wrapped faulting exception for the task. Returns <c>null</c> if the task is not faulted. This property raises a notification only if the task faults (i.e., if the value changes to non-<c>null</c>).
/// </summary>
public AggregateException Exception { get { return Task.Exception; } }
/// <summary>
/// Gets the original faulting exception for the task. Returns <c>null</c> if the task is not faulted. This property raises a notification only if the task faults (i.e., if the value changes to non-<c>null</c>).
/// </summary>
public Exception InnerException { get { return (Exception == null) ? null : Exception.InnerException; } }
/// <summary>
/// Gets the error message for the original faulting exception for the task. Returns <c>null</c> if the task is not faulted. This property raises a notification only if the task faults (i.e., if the value changes to non-<c>null</c>).
/// </summary>
public string ErrorMessage { get { return (InnerException == null) ? null : InnerException.Message; } }
/// <summary>
/// Event that notifies listeners of property value changes.
/// </summary>
public event PropertyChangedEventHandler PropertyChanged;
/// <summary>
/// Creates a new task notifier watching the specified task.
/// </summary>
/// <param name="task">The task to watch.</param>
/// <param name="onException">Callback to be run when an error happens</param>
public static MvxNotifyTask Create(Task task, Action<Exception> onException = null)
{
return new MvxNotifyTask(task, onException);
}
/// <summary>
/// Creates a new task notifier watching the specified task.
/// </summary>
/// <typeparam name="TResult">The type of the task result.</typeparam>
/// <param name="task">The task to watch.</param>
/// <param name="defaultResult">The default "result" value for the task while it is not yet complete.</param>
/// <param name="onException">Callback to be run when an error happens</param>
public static MvxNotifyTask<TResult> Create<TResult>(Task<TResult> task, TResult defaultResult = default(TResult), Action<Exception> onException = null)
{
return new MvxNotifyTask<TResult>(task, defaultResult, onException);
}
/// <summary>
/// Executes the specified asynchronous code and creates a new task notifier watching the returned task.
/// </summary>
/// <param name="asyncAction">The asynchronous code to execute.</param>
/// <param name="onException">Callback to be run when an error happens</param>
public static MvxNotifyTask Create(Func<Task> asyncAction, Action<Exception> onException = null)
{
return Create(asyncAction(), onException);
}
/// <summary>
/// Executes the specified asynchronous code and creates a new task notifier watching the returned task.
/// </summary>
/// <param name="asyncAction">The asynchronous code to execute.</param>
/// <param name="defaultResult">The default "result" value for the task while it is not yet complete.</param>
/// <param name="onException">Callback to be run when an error happens</param>
public static MvxNotifyTask<TResult> Create<TResult>(Func<Task<TResult>> asyncAction, TResult defaultResult = default(TResult), Action<Exception> onException = null)
{
return Create(asyncAction(), defaultResult, onException);
}
}
/// <summary>
/// Watches a task and raises property-changed notifications when the task completes.
/// </summary>
/// <typeparam name="TResult">The type of the task result.</typeparam>
public sealed class MvxNotifyTask<TResult> : INotifyPropertyChanged
{
/// <summary>
/// The "result" of the task when it has not yet completed.
/// </summary>
private readonly TResult _defaultResult;
private Action<Exception> _onException;
/// <summary>
/// Initializes a task notifier watching the specified task.
/// </summary>
/// <param name="task">The task to watch.</param>
/// <param name="defaultResult">The value to return from <see cref="Result"/> while the task is not yet complete.</param>
/// <param name="onException">Callback to be run when an error happens</param>
internal MvxNotifyTask(Task<TResult> task, TResult defaultResult, Action<Exception> onException)
{
_defaultResult = defaultResult;
Task = task;
_onException = onException;
TaskCompleted = MonitorTaskAsync(task);
}
private async Task MonitorTaskAsync(Task task)
{
try
{
await System.Threading.Tasks.Task.Yield();
await task;
}
catch(Exception e)
{
_onException?.Invoke(e);
}
finally
{
NotifyProperties(task);
}
}
private void NotifyProperties(Task task)
{
var propertyChanged = PropertyChanged;
if(propertyChanged == null)
return;
if(task.IsCanceled)
{
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("Status"));
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("IsCanceled"));
}
else if(task.IsFaulted)
{
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("Exception"));
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("InnerException"));
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("ErrorMessage"));
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("Status"));
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("IsFaulted"));
}
else
{
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("Result"));
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("Status"));
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("IsSuccessfullyCompleted"));
}
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("IsCompleted"));
propertyChanged(this, PropertyChangedEventArgsCache.Instance.Get("IsNotCompleted"));
}
/// <summary>
/// Gets the task being watched. This property never changes and is never <c>null</c>.
/// </summary>
public Task<TResult> Task { get; private set; }
/// <summary>
/// Gets a task that completes successfully when <see cref="Task"/> completes (successfully, faulted, or canceled). This property never changes and is never <c>null</c>.
/// </summary>
public Task TaskCompleted { get; private set; }
/// <summary>
/// Gets the result of the task. Returns the "default result" value specified in the constructor if the task has not yet completed successfully. This property raises a notification when the task completes successfully.
/// </summary>
public TResult Result { get { return (Task.Status == TaskStatus.RanToCompletion) ? Task.Result : _defaultResult; } }
/// <summary>
/// Gets the current task status. This property raises a notification when the task completes.
/// </summary>
public TaskStatus Status { get { return Task.Status; } }
/// <summary>
/// Gets whether the task has completed. This property raises a notification when the value changes to <c>true</c>.
/// </summary>
public bool IsCompleted { get { return Task.IsCompleted; } }
/// <summary>
/// Gets whether the task is busy (not completed). This property raises a notification when the value changes to <c>false</c>.
/// </summary>
public bool IsNotCompleted { get { return !Task.IsCompleted; } }
/// <summary>
/// Gets whether the task has completed successfully. This property raises a notification when the value changes to <c>true</c>.
/// </summary>
public bool IsSuccessfullyCompleted { get { return Task.Status == TaskStatus.RanToCompletion; } }
/// <summary>
/// Gets whether the task has been canceled. This property raises a notification only if the task is canceled (i.e., if the value changes to <c>true</c>).
/// </summary>
public bool IsCanceled { get { return Task.IsCanceled; } }
/// <summary>
/// Gets whether the task has faulted. This property raises a notification only if the task faults (i.e., if the value changes to <c>true</c>).
/// </summary>
public bool IsFaulted { get { return Task.IsFaulted; } }
/// <summary>
/// Gets the wrapped faulting exception for the task. Returns <c>null</c> if the task is not faulted. This property raises a notification only if the task faults (i.e., if the value changes to non-<c>null</c>).
/// </summary>
public AggregateException Exception { get { return Task.Exception; } }
/// <summary>
/// Gets the original faulting exception for the task. Returns <c>null</c> if the task is not faulted. This property raises a notification only if the task faults (i.e., if the value changes to non-<c>null</c>).
/// </summary>
public Exception InnerException { get { return (Exception == null) ? null : Exception.InnerException; } }
/// <summary>
/// Gets the error message for the original faulting exception for the task. Returns <c>null</c> if the task is not faulted. This property raises a notification only if the task faults (i.e., if the value changes to non-<c>null</c>).
/// </summary>
public string ErrorMessage { get { return (InnerException == null) ? null : InnerException.Message; } }
/// <summary>
/// Event that notifies listeners of property value changes.
/// </summary>
public event PropertyChangedEventHandler PropertyChanged;
}
}
| 1 | 13,846 | Why was this `Task.Yield` removed? It was added in purpose. | MvvmCross-MvvmCross | .cs |
@@ -162,8 +162,13 @@ public abstract class DiscoGapicMethodConfig extends MethodConfig {
}
Iterable<FieldConfig> requiredFieldConfigs =
- DiscoGapicMethodConfig.createFieldNameConfigs(
- DiscoGapicMethodConfig.getRequiredFields(
+ createFieldNameConfigs(
+ diagCollector,
+ messageConfigs,
+ defaultResourceNameTreatment,
+ fieldNamePatterns,
+ resourceNameConfigs,
+ getRequiredFields(
diagCollector, methodModel, methodConfigProto.getRequiredFieldsList()));
Iterable<FieldConfig> optionalFieldConfigs = | 1 | /* Copyright 2017 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.config;
import com.google.api.codegen.BatchingConfigProto;
import com.google.api.codegen.FlatteningConfigProto;
import com.google.api.codegen.MethodConfigProto;
import com.google.api.codegen.PageStreamingConfigProto;
import com.google.api.codegen.ReleaseLevel;
import com.google.api.codegen.ResourceNameTreatment;
import com.google.api.codegen.SurfaceTreatmentProto;
import com.google.api.codegen.VisibilityProto;
import com.google.api.codegen.config.GrpcStreamingConfig.GrpcStreamingType;
import com.google.api.codegen.discogapic.transformer.DiscoGapicNamer;
import com.google.api.codegen.discovery.Method;
import com.google.api.codegen.transformer.SurfaceNamer;
import com.google.api.tools.framework.model.Diag;
import com.google.api.tools.framework.model.DiagCollector;
import com.google.api.tools.framework.model.SimpleLocation;
import com.google.auto.value.AutoValue;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import java.util.ArrayList;
import java.util.List;
import javax.annotation.Nullable;
import org.threeten.bp.Duration;
/**
* GapicMethodConfig represents the code-gen config for a Discovery doc method, and includes the
* specification of features like page streaming and parameter flattening.
*/
@AutoValue
public abstract class DiscoGapicMethodConfig extends MethodConfig {
@Override
public boolean isGrpcStreaming() {
return false;
}
@Override
public String getRerouteToGrpcInterface() {
return null;
}
@Override
/* Returns the grpc streaming configuration of the method. */
public GrpcStreamingType getGrpcStreamingType() {
return GrpcStreamingType.NonStreaming;
}
@Nullable
@Override
public GrpcStreamingConfig getGrpcStreaming() {
return null;
}
/**
* Creates an instance of DiscoGapicMethodConfig based on MethodConfigProto, linking it up with
* the provided method. On errors, null will be returned, and diagnostics are reported to the diag
* collector.
*/
@Nullable
static DiscoGapicMethodConfig createDiscoGapicMethodConfig(
DiagCollector diagCollector,
String language,
MethodConfigProto methodConfigProto,
Method method,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
ImmutableSet<String> retryCodesConfigNames,
ImmutableSet<String> retryParamsConfigNames,
DiscoGapicNamer discoGapicNamer) {
boolean error = false;
DiscoveryMethodModel methodModel = new DiscoveryMethodModel(method, discoGapicNamer);
PageStreamingConfig pageStreaming = null;
if (!PageStreamingConfigProto.getDefaultInstance()
.equals(methodConfigProto.getPageStreaming())) {
pageStreaming =
PageStreamingConfig.createPageStreaming(diagCollector, method, discoGapicNamer);
if (pageStreaming == null) {
error = true;
}
}
ImmutableList<FlatteningConfig> flattening = null;
if (!FlatteningConfigProto.getDefaultInstance().equals(methodConfigProto.getFlattening())) {
flattening =
createFlattening(
diagCollector, messageConfigs, resourceNameConfigs, methodConfigProto, methodModel);
if (flattening == null) {
error = true;
}
}
BatchingConfig batching = null;
if (!BatchingConfigProto.getDefaultInstance().equals(methodConfigProto.getBatching())) {
batching =
BatchingConfig.createBatching(
diagCollector, methodConfigProto.getBatching(), methodModel);
if (batching == null) {
error = true;
}
}
String retryCodesName = methodConfigProto.getRetryCodesName();
if (!retryCodesName.isEmpty() && !retryCodesConfigNames.contains(retryCodesName)) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Retry codes config used but not defined: '%s' (in method %s)",
retryCodesName,
methodModel.getFullName()));
error = true;
}
String retryParamsName = methodConfigProto.getRetryParamsName();
if (!retryParamsConfigNames.isEmpty() && !retryParamsConfigNames.contains(retryParamsName)) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Retry parameters config used but not defined: %s (in method %s)",
retryParamsName,
methodModel.getFullName()));
error = true;
}
Duration timeout = Duration.ofMillis(methodConfigProto.getTimeoutMillis());
if (timeout.toMillis() <= 0) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Default timeout not found or has invalid value (in method %s)",
methodModel.getFullName()));
error = true;
}
boolean hasRequestObjectMethod = methodConfigProto.getRequestObjectMethod();
ImmutableMap<String, String> fieldNamePatterns =
ImmutableMap.copyOf(methodConfigProto.getFieldNamePatterns());
ResourceNameTreatment defaultResourceNameTreatment =
methodConfigProto.getResourceNameTreatment();
if (defaultResourceNameTreatment == null
|| defaultResourceNameTreatment.equals(ResourceNameTreatment.UNSET_TREATMENT)) {
defaultResourceNameTreatment = ResourceNameTreatment.NONE;
}
Iterable<FieldConfig> requiredFieldConfigs =
DiscoGapicMethodConfig.createFieldNameConfigs(
DiscoGapicMethodConfig.getRequiredFields(
diagCollector, methodModel, methodConfigProto.getRequiredFieldsList()));
Iterable<FieldConfig> optionalFieldConfigs =
DiscoGapicMethodConfig.createFieldNameConfigs(
DiscoGapicMethodConfig.getOptionalFields(
methodModel, methodConfigProto.getRequiredFieldsList()));
List<String> sampleCodeInitFields = new ArrayList<>();
sampleCodeInitFields.addAll(methodConfigProto.getRequiredFieldsList());
sampleCodeInitFields.addAll(methodConfigProto.getSampleCodeInitFieldsList());
VisibilityConfig visibility = VisibilityConfig.PUBLIC;
ReleaseLevel releaseLevel = ReleaseLevel.ALPHA;
for (SurfaceTreatmentProto treatment : methodConfigProto.getSurfaceTreatmentsList()) {
if (!treatment.getIncludeLanguagesList().contains(language)) {
continue;
}
if (treatment.getVisibility() != VisibilityProto.UNSET_VISIBILITY) {
visibility = VisibilityConfig.fromProto(treatment.getVisibility());
}
if (treatment.getReleaseLevel() != ReleaseLevel.UNSET_RELEASE_LEVEL) {
releaseLevel = treatment.getReleaseLevel();
}
}
LongRunningConfig longRunningConfig = null;
if (error) {
return null;
} else {
return new AutoValue_DiscoGapicMethodConfig(
methodModel,
pageStreaming,
flattening,
retryCodesName,
retryParamsName,
timeout,
requiredFieldConfigs,
optionalFieldConfigs,
defaultResourceNameTreatment,
batching,
hasRequestObjectMethod,
fieldNamePatterns,
sampleCodeInitFields,
visibility,
releaseLevel,
longRunningConfig);
}
}
private static Iterable<FieldConfig> createFieldNameConfigs(Iterable<FieldModel> fields) {
ImmutableList.Builder<FieldConfig> fieldConfigsBuilder = ImmutableList.builder();
for (FieldModel field : fields) {
fieldConfigsBuilder.add(FieldConfig.createFieldConfig(field));
}
return fieldConfigsBuilder.build();
}
@Override
/* Return the list of "one of" instances associated with the fields. */
public Iterable<Iterable<String>> getOneofNames(SurfaceNamer namer) {
return ImmutableList.of();
}
}
| 1 | 24,047 | You'll also want to do this below, for the `optionalFieldConfigs`. See `GapicMethodConfig.java:180`. | googleapis-gapic-generator | java |
@@ -134,7 +134,8 @@ Status GoExecutor::prepareOver() {
if (clause == nullptr) {
LOG(FATAL) << "Over clause shall never be null";
}
- edge_ = ectx()->schemaManager()->toEdgeType(*clause->edge());
+ auto space = ectx()->rctx()->session()->space();
+ edge_ = ectx()->schemaManager()->toEdgeType(space, *clause->edge());
reversely_ = clause->isReversely();
if (clause->alias() != nullptr) {
expCtx_->addAlias(*clause->alias(), AliasKind::Edge, *clause->edge()); | 1 | /* Copyright (c) 2018 - present, VE Software Inc. All rights reserved
*
* This source code is licensed under Apache 2.0 License
* (found in the LICENSE.Apache file in the root directory)
*/
#include "base/Base.h"
#include "graph/GoExecutor.h"
#include "meta/SchemaManager.h"
#include "dataman/RowReader.h"
#include "dataman/RowSetReader.h"
#include "dataman/ResultSchemaProvider.h"
namespace nebula {
namespace graph {
GoExecutor::GoExecutor(Sentence *sentence, ExecutionContext *ectx) : TraverseExecutor(ectx) {
// The RTTI is guaranteed by Sentence::Kind,
// so we use `static_cast' instead of `dynamic_cast' for the sake of efficiency.
sentence_ = static_cast<GoSentence*>(sentence);
}
Status GoExecutor::prepare() {
DCHECK(sentence_ != nullptr);
Status status;
expCtx_ = std::make_unique<ExpressionContext>();
do {
status = checkIfGraphSpaceChosen();
if (!status.ok()) {
break;
}
status = prepareStep();
if (!status.ok()) {
break;
}
status = prepareFrom();
if (!status.ok()) {
break;
}
status = prepareOver();
if (!status.ok()) {
break;
}
status = prepareWhere();
if (!status.ok()) {
break;
}
status = prepareYield();
if (!status.ok()) {
break;
}
status = prepareNeededProps();
if (!status.ok()) {
break;
}
} while (false);
if (!status.ok()) {
return status;
}
return status;
}
void GoExecutor::execute() {
FLOG_INFO("Executing Go: %s", sentence_->toString().c_str());
auto status = setupStarts();
if (!status.ok()) {
onError_(std::move(status));
return;
}
if (starts_.empty()) {
onEmptyInputs();
return;
}
stepOut();
}
void GoExecutor::feedResult(std::unique_ptr<InterimResult> result) {
inputs_ = std::move(result);
}
Status GoExecutor::prepareStep() {
auto *clause = sentence_->stepClause();
if (clause != nullptr) {
steps_ = clause->steps();
upto_ = clause->isUpto();
}
if (isUpto()) {
return Status::Error("`UPTO' not supported yet");
}
return Status::OK();
}
Status GoExecutor::prepareFrom() {
Status status = Status::OK();
auto *clause = sentence_->fromClause();
do {
if (clause == nullptr) {
LOG(FATAL) << "From clause shall never be null";
}
if (!clause->isRef()) {
starts_ = clause->srcNodeList()->nodeIds();
} else {
auto *expr = clause->ref();
if (expr->isInputExpression()) {
auto *iexpr = static_cast<InputPropertyExpression*>(expr);
colname_ = iexpr->prop();
} else if (expr->isVariableExpression()) {
auto *vexpr = static_cast<VariablePropertyExpression*>(expr);
varname_ = vexpr->var();
colname_ = vexpr->prop();
} else {
// No way to happen except memory corruption
LOG(FATAL) << "Unknown kind of expression";
}
}
} while (false);
return status;
}
Status GoExecutor::prepareOver() {
Status status = Status::OK();
auto *clause = sentence_->overClause();
do {
if (clause == nullptr) {
LOG(FATAL) << "Over clause shall never be null";
}
edge_ = ectx()->schemaManager()->toEdgeType(*clause->edge());
reversely_ = clause->isReversely();
if (clause->alias() != nullptr) {
expCtx_->addAlias(*clause->alias(), AliasKind::Edge, *clause->edge());
} else {
expCtx_->addAlias(*clause->edge(), AliasKind::Edge, *clause->edge());
}
} while (false);
if (isReversely()) {
return Status::Error("`REVERSELY' not supported yet");
}
return status;
}
Status GoExecutor::prepareWhere() {
auto *clause = sentence_->whereClause();
if (clause != nullptr) {
filter_ = clause->filter();
}
return Status::OK();
}
Status GoExecutor::prepareYield() {
auto *clause = sentence_->yieldClause();
if (clause != nullptr) {
yields_ = clause->columns();
}
return Status::OK();
}
Status GoExecutor::prepareNeededProps() {
auto status = Status::OK();
do {
if (filter_ != nullptr) {
filter_->setContext(expCtx_.get());
status = filter_->prepare();
if (!status.ok()) {
break;
}
}
if (yields_.empty()) {
break;
}
for (auto *col : yields_) {
col->expr()->setContext(expCtx_.get());
status = col->expr()->prepare();
if (!status.ok()) {
break;
}
}
if (!status.ok()) {
break;
}
} while (false);
return status;
}
Status GoExecutor::setupStarts() {
// Literal vertex ids
if (!starts_.empty()) {
return Status::OK();
}
// Take one column from a variable
if (varname_ != nullptr) {
auto *varinput = ectx()->variableHolder()->get(*varname_);
if (varinput == nullptr) {
return Status::Error("Variable `%s' not defined", varname_->c_str());
}
starts_ = varinput->getVIDs(*colname_);
return Status::OK();
}
// No error happened, but we are having empty inputs
if (inputs_ == nullptr) {
return Status::OK();
}
// Take one column from the input of the pipe
DCHECK(colname_ != nullptr);
starts_ = inputs_->getVIDs(*colname_);
return Status::OK();
}
void GoExecutor::setupResponse(cpp2::ExecutionResponse &resp) {
if (resp_ == nullptr) {
resp_ = std::make_unique<cpp2::ExecutionResponse>();
}
resp = std::move(*resp_);
}
void GoExecutor::stepOut() {
auto space = ectx()->rctx()->session()->space();
auto returns = getStepOutProps();
auto future = ectx()->storage()->getNeighbors(space,
starts_,
edge_,
!reversely_,
"",
std::move(returns));
auto *runner = ectx()->rctx()->runner();
auto cb = [this] (auto &&result) {
auto completeness = result.completeness();
if (completeness == 0) {
DCHECK(onError_);
onError_(Status::Error("Get neighbors failed"));
return;
} else if (completeness != 100) {
// TODO(dutor) We ought to let the user know that the execution was partially
// performed, even in the case that this happened in the intermediate process.
// Or, make this case configurable at runtime.
// For now, we just do some logging and keep going.
LOG(INFO) << "Get neighbors partially failed: " << completeness << "%";
for (auto &error : result.failedParts()) {
LOG(ERROR) << "part: " << error.first
<< "error code: " << static_cast<int>(error.second);
}
}
onStepOutResponse(std::move(result));
};
auto error = [this] (auto &&e) {
LOG(ERROR) << "Exception caught: " << e.what();
onError_(Status::Error("Internal error"));
};
std::move(future).via(runner).thenValue(cb).thenError(error);
}
void GoExecutor::onStepOutResponse(RpcResponse &&rpcResp) {
if (isFinalStep()) {
if (expCtx_->hasDstTagProp()) {
auto dstids = getDstIdsFromResp(rpcResp);
if (dstids.empty()) {
onEmptyInputs();
return;
}
fetchVertexProps(std::move(dstids), std::move(rpcResp));
return;
}
finishExecution(std::move(rpcResp));
return;
} else {
curStep_++;
starts_ = getDstIdsFromResp(rpcResp);
if (starts_.empty()) {
onEmptyInputs();
return;
}
stepOut();
}
}
void GoExecutor::onVertexProps(RpcResponse &&rpcResp) {
UNUSED(rpcResp);
}
std::vector<VertexID> GoExecutor::getDstIdsFromResp(RpcResponse &rpcResp) const {
std::unordered_set<VertexID> set;
for (auto &resp : rpcResp.responses()) {
auto *vertices = resp.get_vertices();
if (vertices == nullptr) {
continue;
}
auto schema = std::make_shared<ResultSchemaProvider>(resp.edge_schema);
for (auto &vdata : *vertices) {
RowSetReader rsReader(schema, vdata.edge_data);
auto iter = rsReader.begin();
while (iter) {
VertexID dst;
auto rc = iter->getVid("_dst", dst);
CHECK(rc == ResultType::SUCCEEDED);
set.emplace(dst);
++iter;
}
}
}
return std::vector<VertexID>(set.begin(), set.end());
}
void GoExecutor::finishExecution(RpcResponse &&rpcResp) {
if (onResult_) {
onResult_(setupInterimResult(std::move(rpcResp)));
} else {
resp_ = std::make_unique<cpp2::ExecutionResponse>();
setupResponseHeader(*resp_);
setupResponseBody(rpcResp, *resp_);
}
DCHECK(onFinish_);
onFinish_();
}
std::vector<storage::cpp2::PropDef> GoExecutor::getStepOutProps() const {
std::vector<storage::cpp2::PropDef> props;
{
storage::cpp2::PropDef pd;
pd.owner = storage::cpp2::PropOwner::EDGE;
pd.name = "_dst";
props.emplace_back(std::move(pd));
}
if (!isFinalStep()) {
return props;
}
for (auto &tagProp : expCtx_->srcTagProps()) {
storage::cpp2::PropDef pd;
pd.owner = storage::cpp2::PropOwner::SOURCE;
pd.name = tagProp.second;
auto tagId = ectx()->schemaManager()->toTagID(tagProp.first);
pd.set_tag_id(tagId);
props.emplace_back(std::move(pd));
}
for (auto &prop : expCtx_->edgeProps()) {
storage::cpp2::PropDef pd;
pd.owner = storage::cpp2::PropOwner::EDGE;
pd.name = prop;
props.emplace_back(std::move(pd));
}
return props;
}
std::vector<storage::cpp2::PropDef> GoExecutor::getDstProps() const {
std::vector<storage::cpp2::PropDef> props;
for (auto &tagProp : expCtx_->dstTagProps()) {
storage::cpp2::PropDef pd;
pd.owner = storage::cpp2::PropOwner::DEST;
pd.name = tagProp.second;
auto tagId = ectx()->schemaManager()->toTagID(tagProp.first);
pd.set_tag_id(tagId);
props.emplace_back(std::move(pd));
}
return props;
}
void GoExecutor::fetchVertexProps(std::vector<VertexID> ids, RpcResponse &&rpcResp) {
auto space = ectx()->rctx()->session()->space();
auto returns = getDstProps();
auto future = ectx()->storage()->getVertexProps(space, ids, returns);
auto *runner = ectx()->rctx()->runner();
auto cb = [this, stepOutResp = std::move(rpcResp)] (auto &&result) mutable {
auto completeness = result.completeness();
if (completeness == 0) {
DCHECK(onError_);
onError_(Status::Error("Get dest props failed"));
return;
} else if (completeness != 100) {
LOG(INFO) << "Get neighbors partially failed: " << completeness << "%";
for (auto &error : result.failedParts()) {
LOG(ERROR) << "part: " << error.first
<< "error code: " << static_cast<int>(error.second);
}
}
if (vertexHolder_ == nullptr) {
vertexHolder_ = std::make_unique<VertexHolder>();
}
for (auto &resp : result.responses()) {
vertexHolder_->add(resp);
}
finishExecution(std::move(stepOutResp));
return;
};
auto error = [this] (auto &&e) {
LOG(ERROR) << "Exception caught: " << e.what();
onError_(Status::Error("Internal error"));
};
std::move(future).via(runner).thenValue(cb).thenError(error);
}
std::vector<std::string> GoExecutor::getResultColumnNames() const {
std::vector<std::string> result;
result.reserve(yields_.size());
for (auto *col : yields_) {
if (col->alias() == nullptr) {
result.emplace_back(col->expr()->toString());
} else {
result.emplace_back(*col->alias());
}
}
return result;
}
std::unique_ptr<InterimResult> GoExecutor::setupInterimResult(RpcResponse &&rpcResp) {
// Generic results
std::shared_ptr<SchemaWriter> schema;
std::unique_ptr<RowSetWriter> rsWriter;
using nebula::cpp2::SupportedType;
auto cb = [&] (std::vector<VariantType> record) {
if (schema == nullptr) {
schema = std::make_shared<SchemaWriter>();
auto colnames = getResultColumnNames();
for (auto i = 0u; i < record.size(); i++) {
SupportedType type;
switch (record[i].which()) {
case 0:
// all integers in InterimResult are regarded as type of VID
type = SupportedType::VID;
break;
case 1:
type = SupportedType::DOUBLE;
break;
case 2:
type = SupportedType::BOOL;
break;
case 3:
type = SupportedType::STRING;
break;
default:
LOG(FATAL) << "Unknown VariantType: " << record[i].which();
}
schema->appendCol(colnames[i], type);
} // for
rsWriter = std::make_unique<RowSetWriter>(schema);
} // if
RowWriter writer(schema);
for (auto &column : record) {
switch (column.which()) {
case 0:
writer << boost::get<int64_t>(column);
break;
case 1:
writer << boost::get<double>(column);
break;
case 2:
writer << boost::get<bool>(column);
break;
case 3:
writer << boost::get<std::string>(column);
break;
default:
LOG(FATAL) << "Unknown VariantType: " << column.which();
}
}
rsWriter->addRow(writer);
}; // cb
processFinalResult(rpcResp, cb);
// No results populated
if (rsWriter == nullptr) {
return nullptr;
}
return std::make_unique<InterimResult>(std::move(rsWriter));
}
void GoExecutor::setupResponseHeader(cpp2::ExecutionResponse &resp) const {
resp.set_column_names(getResultColumnNames());
}
VariantType getProp(const std::string &prop,
const RowReader *reader,
ResultSchemaProvider *schema) {
using nebula::cpp2::SupportedType;
auto type = schema->getFieldType(prop).type;
switch (type) {
case SupportedType::BOOL: {
bool v;
reader->getBool(prop, v);
return v;
}
case SupportedType::INT: {
int64_t v;
reader->getInt(prop, v);
return v;
}
case SupportedType::VID: {
VertexID v;
reader->getVid(prop, v);
return v;
}
case SupportedType::FLOAT: {
float v;
reader->getFloat(prop, v);
return static_cast<double>(v);
}
case SupportedType::DOUBLE: {
double v;
reader->getDouble(prop, v);
return v;
}
case SupportedType::STRING: {
folly::StringPiece v;
reader->getString(prop, v);
return v.toString();
}
default:
LOG(FATAL) << "Unknown type: " << static_cast<int32_t>(type);
return "";
}
}
void GoExecutor::setupResponseBody(RpcResponse &rpcResp, cpp2::ExecutionResponse &resp) const {
std::vector<cpp2::RowValue> rows;
auto cb = [&] (std::vector<VariantType> record) {
std::vector<cpp2::ColumnValue> row;
row.reserve(record.size());
for (auto &column : record) {
row.emplace_back();
switch (column.which()) {
case 0:
row.back().set_integer(boost::get<int64_t>(column));
break;
case 1:
row.back().set_double_precision(boost::get<double>(column));
break;
case 2:
row.back().set_bool_val(boost::get<bool>(column));
break;
case 3:
row.back().set_str(boost::get<std::string>(column));
break;
default:
LOG(FATAL) << "Unknown VariantType: " << column.which();
}
}
rows.emplace_back();
rows.back().set_columns(std::move(row));
};
processFinalResult(rpcResp, cb);
resp.set_rows(std::move(rows));
}
void GoExecutor::onEmptyInputs() {
if (onResult_) {
onResult_(nullptr);
} else if (resp_ == nullptr) {
resp_ = std::make_unique<cpp2::ExecutionResponse>();
}
onFinish_();
}
void GoExecutor::processFinalResult(RpcResponse &rpcResp, Callback cb) const {
auto all = rpcResp.responses();
for (auto &resp : all) {
if (resp.get_vertices() == nullptr) {
continue;
}
std::shared_ptr<ResultSchemaProvider> vschema;
std::shared_ptr<ResultSchemaProvider> eschema;
if (resp.get_vertex_schema() != nullptr) {
vschema = std::make_shared<ResultSchemaProvider>(resp.vertex_schema);
}
if (resp.get_edge_schema() != nullptr) {
eschema = std::make_shared<ResultSchemaProvider>(resp.edge_schema);
}
for (auto &vdata : resp.vertices) {
std::unique_ptr<RowReader> vreader;
if (vschema != nullptr) {
DCHECK(vdata.__isset.vertex_data);
vreader = RowReader::getRowReader(vdata.vertex_data, vschema);
}
DCHECK(vdata.__isset.edge_data);
DCHECK(eschema != nullptr);
RowSetReader rsReader(eschema, vdata.edge_data);
auto iter = rsReader.begin();
while (iter) {
auto &getters = expCtx_->getters();
getters.getEdgeProp = [&] (const std::string &prop) -> VariantType {
return getProp(prop, &*iter, eschema.get());
};
getters.getSrcTagProp = [&] (const std::string&, const std::string &prop) {
return getProp(prop, vreader.get(), vschema.get());
};
getters.getDstTagProp = [&] (const std::string&, const std::string &prop) {
auto dst = getProp("_dst", &*iter, eschema.get());
return vertexHolder_->get(boost::get<int64_t>(dst), prop);
};
// Evaluate filter
if (filter_ != nullptr) {
auto value = filter_->eval();
if (!Expression::asBool(value)) {
++iter;
continue;
}
}
std::vector<VariantType> record;
record.reserve(yields_.size());
for (auto *column : yields_) {
auto *expr = column->expr();
// TODO(dutor) `eval' may fail
auto value = expr->eval();
record.emplace_back(std::move(value));
}
cb(std::move(record));
++iter;
} // while `iter'
} // for `vdata'
} // for `resp'
}
VariantType GoExecutor::VertexHolder::get(VertexID id, const std::string &prop) const {
DCHECK(schema_ != nullptr);
auto iter = data_.find(id);
// TODO(dutor) We need a type to represent NULL or non-existing prop
CHECK(iter != data_.end());
auto reader = RowReader::getRowReader(iter->second, schema_);
return getProp(prop, reader.get(), schema_.get());
}
void GoExecutor::VertexHolder::add(const storage::cpp2::QueryResponse &resp) {
auto *vertices = resp.get_vertices();
if (vertices == nullptr) {
return;
}
if (resp.get_vertex_schema() == nullptr) {
return;
}
if (schema_ == nullptr) {
schema_ = std::make_shared<ResultSchemaProvider>(resp.vertex_schema);
}
for (auto &vdata : *vertices) {
DCHECK(vdata.__isset.vertex_data);
data_[vdata.vertex_id] = vdata.vertex_data;
}
}
} // namespace graph
} // namespace nebula
| 1 | 16,349 | We'd better use "spaceId" as variable name here. Because i can't figure out the really type for "space" at the first glimpse due to "auto" used. | vesoft-inc-nebula | cpp |
@@ -37,6 +37,7 @@ partial class Build
AbsolutePath ArtifactsDirectory => Artifacts ?? (OutputDirectory / "artifacts");
AbsolutePath WindowsTracerHomeZip => ArtifactsDirectory / "windows-tracer-home.zip";
AbsolutePath BuildDataDirectory => RootDirectory / "build_data";
+ AbsolutePath LibSqreenDirectory => RootDirectory / "packages" / "libsqreen.1.1.2.2";
AbsolutePath SourceDirectory => RootDirectory / "src";
AbsolutePath TestsDirectory => RootDirectory / "test"; | 1 | using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text.RegularExpressions;
using Nuke.Common;
using Nuke.Common.IO;
using Nuke.Common.ProjectModel;
using Nuke.Common.Tooling;
using Nuke.Common.Tools.DotNet;
using Nuke.Common.Tools.MSBuild;
using Nuke.Common.Tools.NuGet;
using Nuke.Common.Utilities.Collections;
using static Nuke.Common.EnvironmentInfo;
using static Nuke.Common.IO.FileSystemTasks;
using static Nuke.Common.IO.PathConstruction;
using static Nuke.Common.IO.CompressionTasks;
using static Nuke.Common.Tools.DotNet.DotNetTasks;
using static Nuke.Common.Tools.MSBuild.MSBuildTasks;
using static CustomDotNetTasks;
// #pragma warning disable SA1306
// #pragma warning disable SA1134
// #pragma warning disable SA1111
// #pragma warning disable SA1400
// #pragma warning disable SA1401
partial class Build
{
[Solution("Datadog.Trace.sln")] readonly Solution Solution;
AbsolutePath MsBuildProject => RootDirectory / "Datadog.Trace.proj";
AbsolutePath OutputDirectory => RootDirectory / "bin";
AbsolutePath TracerHomeDirectory => TracerHome ?? (OutputDirectory / "tracer-home");
AbsolutePath DDTracerHomeDirectory => DDTracerHome ?? (OutputDirectory / "dd-tracer-home");
AbsolutePath ArtifactsDirectory => Artifacts ?? (OutputDirectory / "artifacts");
AbsolutePath WindowsTracerHomeZip => ArtifactsDirectory / "windows-tracer-home.zip";
AbsolutePath BuildDataDirectory => RootDirectory / "build_data";
AbsolutePath SourceDirectory => RootDirectory / "src";
AbsolutePath TestsDirectory => RootDirectory / "test";
string TempDirectory => IsWin ? Path.GetTempPath() : "/tmp/";
string TracerLogDirectory => IsWin
? Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.CommonApplicationData),
"Datadog .NET Tracer", "logs")
: "/var/log/datadog/dotnet/";
Project NativeProfilerProject => Solution.GetProject(Projects.ClrProfilerNative);
[LazyPathExecutable(name: "cmake")] readonly Lazy<Tool> CMake;
[LazyPathExecutable(name: "make")] readonly Lazy<Tool> Make;
[LazyPathExecutable(name: "fpm")] readonly Lazy<Tool> Fpm;
[LazyPathExecutable(name: "gzip")] readonly Lazy<Tool> GZip;
[LazyPathExecutable(name: "cmd")] readonly Lazy<Tool> Cmd;
IEnumerable<MSBuildTargetPlatform> ArchitecturesForPlatform =>
Equals(Platform, MSBuildTargetPlatform.x64)
? new[] { MSBuildTargetPlatform.x64, MSBuildTargetPlatform.x86 }
: new[] { MSBuildTargetPlatform.x86 };
bool IsArm64 => RuntimeInformation.ProcessArchitecture == Architecture.Arm64;
string LinuxArchitectureIdentifier => IsArm64 ? "arm64" : Platform.ToString();
IEnumerable<string> LinuxPackageTypes => IsAlpine ? new[] { "tar" } : new[] { "deb", "rpm", "tar" };
IEnumerable<Project> ProjectsToPack => new[]
{
Solution.GetProject(Projects.DatadogTrace),
Solution.GetProject(Projects.DatadogTraceOpenTracing),
};
Project[] ParallelIntegrationTests => new[]
{
Solution.GetProject(Projects.TraceIntegrationTests),
Solution.GetProject(Projects.OpenTracingIntegrationTests),
};
Project[] ClrProfilerIntegrationTests => new[]
{
Solution.GetProject(Projects.ClrProfilerIntegrationTests)
};
readonly IEnumerable<TargetFramework> TargetFrameworks = new[]
{
TargetFramework.NET45,
TargetFramework.NET461,
TargetFramework.NETSTANDARD2_0,
TargetFramework.NETCOREAPP3_1,
};
Target CreateRequiredDirectories => _ => _
.Unlisted()
.Executes(() =>
{
EnsureExistingDirectory(TracerHomeDirectory);
EnsureExistingDirectory(ArtifactsDirectory);
EnsureExistingDirectory(DDTracerHomeDirectory);
EnsureExistingDirectory(BuildDataDirectory);
});
Target Restore => _ => _
.After(Clean)
.Unlisted()
.Executes(() =>
{
if (IsWin)
{
NuGetTasks.NuGetRestore(s => s
.SetTargetPath(Solution)
.SetVerbosity(NuGetVerbosity.Normal)
.When(!string.IsNullOrEmpty(NugetPackageDirectory), o =>
o.SetPackagesDirectory(NugetPackageDirectory)));
}
else
{
DotNetRestore(s => s
.SetProjectFile(Solution)
.SetVerbosity(DotNetVerbosity.Normal)
// .SetTargetPlatform(Platform) // necessary to ensure we restore every project
.SetProperty("configuration", BuildConfiguration.ToString())
.When(!string.IsNullOrEmpty(NugetPackageDirectory), o =>
o.SetPackageDirectory(NugetPackageDirectory)));
}
});
Target CompileNativeSrcWindows => _ => _
.Unlisted()
.After(CompileManagedSrc)
.OnlyWhenStatic(() => IsWin)
.Executes(() =>
{
// If we're building for x64, build for x86 too
var platforms =
Equals(Platform, MSBuildTargetPlatform.x64)
? new[] { MSBuildTargetPlatform.x64, MSBuildTargetPlatform.x86 }
: new[] { MSBuildTargetPlatform.x86 };
// Can't use dotnet msbuild, as needs to use the VS version of MSBuild
MSBuild(s => s
.SetTargetPath(MsBuildProject)
.SetConfiguration(BuildConfiguration)
.SetMSBuildPath()
.SetTargets("BuildCppSrc")
.DisableRestore()
.SetMaxCpuCount(null)
.CombineWith(platforms, (m, platform) => m
.SetTargetPlatform(platform)));
});
Target CompileNativeSrcLinux => _ => _
.Unlisted()
.After(CompileManagedSrc)
.OnlyWhenStatic(() => IsLinux)
.Executes(() =>
{
var buildDirectory = NativeProfilerProject.Directory / "build";
EnsureExistingDirectory(buildDirectory);
CMake.Value(
arguments: "../ -DCMAKE_BUILD_TYPE=Release",
workingDirectory: buildDirectory);
Make.Value(workingDirectory: buildDirectory);
});
Target CompileNativeSrcMacOs => _ => _
.Unlisted()
.After(CompileManagedSrc)
.OnlyWhenStatic(() => IsOsx)
.Executes(() =>
{
var nativeProjectDirectory = NativeProfilerProject.Directory;
CMake.Value(arguments: ".", workingDirectory: nativeProjectDirectory);
Make.Value(workingDirectory: nativeProjectDirectory);
});
Target CompileNativeSrc => _ => _
.Unlisted()
.Description("Compiles the native loader")
.DependsOn(CompileNativeSrcWindows)
.DependsOn(CompileNativeSrcMacOs)
.DependsOn(CompileNativeSrcLinux);
Target CompileManagedSrc => _ => _
.Unlisted()
.Description("Compiles the managed code in the src directory")
.After(CreateRequiredDirectories)
.After(Restore)
.Executes(() =>
{
// Always AnyCPU
DotNetMSBuild(x => x
.SetTargetPath(MsBuildProject)
.SetTargetPlatformAnyCPU()
.SetConfiguration(BuildConfiguration)
.DisableRestore()
.SetTargets("BuildCsharpSrc")
);
});
Target CompileNativeTestsWindows => _ => _
.Unlisted()
.After(CompileNativeSrc)
.OnlyWhenStatic(() => IsWin)
.Executes(() =>
{
// If we're building for x64, build for x86 too
var platforms =
Equals(Platform, MSBuildTargetPlatform.x64)
? new[] { MSBuildTargetPlatform.x64, MSBuildTargetPlatform.x86 }
: new[] { MSBuildTargetPlatform.x86 };
// Can't use dotnet msbuild, as needs to use the VS version of MSBuild
MSBuild(s => s
.SetTargetPath(MsBuildProject)
.SetConfiguration(BuildConfiguration)
.SetMSBuildPath()
.SetTargets("BuildCppTests")
.DisableRestore()
.SetMaxCpuCount(null)
.CombineWith(platforms, (m, platform) => m
.SetTargetPlatform(platform)));
});
Target CompileNativeTestsLinux => _ => _
.Unlisted()
.After(CompileNativeSrc)
.OnlyWhenStatic(() => IsLinux)
.Executes(() =>
{
Logger.Error("We don't currently run unit tests on Linux");
});
Target CompileNativeTests => _ => _
.Unlisted()
.Description("Compiles the native loader unit tests")
.DependsOn(CompileNativeTestsWindows)
.DependsOn(CompileNativeTestsLinux);
Target CopyIntegrationsJson => _ => _
.Unlisted()
.After(Clean)
.After(CreateRequiredDirectories)
.Executes(() =>
{
var source = RootDirectory / "integrations.json";
var dest = TracerHomeDirectory;
Logger.Info($"Copying '{source}' to '{dest}'");
CopyFileToDirectory(source, dest, FileExistsPolicy.Overwrite);
});
Target PublishManagedProfiler => _ => _
.Unlisted()
.After(CompileManagedSrc)
.Executes(() =>
{
var targetFrameworks = IsWin
? TargetFrameworks
: TargetFrameworks.Where(framework => !framework.ToString().StartsWith("net4"));
DotNetPublish(s => s
.SetProject(Solution.GetProject(Projects.ClrProfilerManaged))
.SetConfiguration(BuildConfiguration)
.SetTargetPlatformAnyCPU()
.EnableNoBuild()
.EnableNoRestore()
.CombineWith(targetFrameworks, (p, framework) => p
.SetFramework(framework)
.SetOutput(TracerHomeDirectory / framework)));
});
Target PublishNativeProfilerWindows => _ => _
.Unlisted()
.OnlyWhenStatic(() => IsWin)
.After(CompileNativeSrc, PublishManagedProfiler)
.Executes(() =>
{
foreach (var architecture in ArchitecturesForPlatform)
{
var source = NativeProfilerProject.Directory / "bin" / BuildConfiguration / architecture.ToString() /
$"{NativeProfilerProject.Name}.dll";
var dest = TracerHomeDirectory / $"win-{architecture}";
Logger.Info($"Copying '{source}' to '{dest}'");
CopyFileToDirectory(source, dest, FileExistsPolicy.Overwrite);
}
});
Target PublishNativeProfilerLinux => _ => _
.Unlisted()
.OnlyWhenStatic(() => IsLinux)
.After(CompileNativeSrc, PublishManagedProfiler)
.Executes(() =>
{
// copy createLogPath.sh
CopyFileToDirectory(
RootDirectory / "build" / "artifacts" / "createLogPath.sh",
TracerHomeDirectory,
FileExistsPolicy.Overwrite);
// Copy Native file
CopyFileToDirectory(
NativeProfilerProject.Directory / "build" / "bin" / $"{NativeProfilerProject.Name}.so",
TracerHomeDirectory,
FileExistsPolicy.Overwrite);
});
Target PublishNativeProfilerMacOs => _ => _
.Unlisted()
.OnlyWhenStatic(() => IsOsx)
.After(CompileNativeSrc, PublishManagedProfiler)
.Executes(() =>
{
// copy createLogPath.sh
CopyFileToDirectory(
RootDirectory / "build" / "artifacts" / "createLogPath.sh",
TracerHomeDirectory,
FileExistsPolicy.Overwrite);
// Create home directory
CopyFileToDirectory(
NativeProfilerProject.Directory / "bin" / $"{NativeProfilerProject.Name}.dylib",
TracerHomeDirectory,
FileExistsPolicy.Overwrite);
});
Target PublishNativeProfiler => _ => _
.Unlisted()
.DependsOn(PublishNativeProfilerWindows)
.DependsOn(PublishNativeProfilerLinux)
.DependsOn(PublishNativeProfilerMacOs);
Target CreateDdTracerHome => _ => _
.Unlisted()
.After(PublishNativeProfiler, CopyIntegrationsJson, PublishManagedProfiler)
.Executes(() =>
{
// start by copying everything from the tracer home dir
CopyDirectoryRecursively(TracerHomeDirectory, DDTracerHomeDirectory, DirectoryExistsPolicy.Merge, FileExistsPolicy.Overwrite);
if (IsWin)
{
// windows already has the expected layout
return;
}
// Move the native file to the architecture-specific folder
var (architecture, fileName) = IsOsx
? ("osx-x64", $"{NativeProfilerProject.Name}.dylib")
: ($"linux-{LinuxArchitectureIdentifier}", $"{NativeProfilerProject.Name}.so");
var outputDir = DDTracerHomeDirectory / architecture;
EnsureCleanDirectory(outputDir);
MoveFile(
DDTracerHomeDirectory / fileName,
outputDir / fileName);
});
Target BuildMsi => _ => _
.Unlisted()
.Description("Builds the .msi files from the compiled tracer home directory")
.After(BuildTracerHome)
.OnlyWhenStatic(() => IsWin)
.Executes(() =>
{
MSBuild(s => s
.SetTargetPath(Solution.GetProject(Projects.WindowsInstaller))
.SetConfiguration(BuildConfiguration)
.SetMSBuildPath()
.AddProperty("RunWixToolsOutOfProc", true)
.SetProperty("TracerHomeDirectory", TracerHomeDirectory)
.SetMaxCpuCount(null)
.CombineWith(ArchitecturesForPlatform, (o, arch) => o
.SetProperty("MsiOutputPath", ArtifactsDirectory / arch.ToString())
.SetTargetPlatform(arch)),
degreeOfParallelism: 2);
});
/// <summary>
/// This target is a bit of a hack, but means that we actually use the All CPU builds in intgration tests etc
/// </summary>
Target CreatePlatformlessSymlinks => _ => _
.Description("Copies the build output from 'All CPU' platforms to platform-specific folders")
.Unlisted()
.After(CompileManagedSrc)
.After(CompileDependencyLibs)
.After(CompileManagedTestHelpers)
.Executes(() =>
{
// create junction for each directory
var directories = RootDirectory.GlobDirectories(
$"src/**/bin/{BuildConfiguration}",
$"tools/**/bin/{BuildConfiguration}",
$"test/Datadog.Trace.TestHelpers/**/bin/{BuildConfiguration}",
$"test/test-applications/integrations/dependency-libs/**/bin/{BuildConfiguration}"
);
directories.ForEach(existingDir =>
{
var newDir = existingDir.Parent / $"{Platform}" / BuildConfiguration;
if (DirectoryExists(newDir))
{
Logger.Info($"Skipping '{newDir}' as already exists");
}
else
{
EnsureExistingDirectory(newDir.Parent);
Cmd.Value(arguments: $"cmd /c mklink /J \"{newDir}\" \"{existingDir}\"");
}
});
});
Target ZipTracerHome => _ => _
.Unlisted()
.After(BuildTracerHome)
.Requires(() => Version)
.Executes(() =>
{
if (IsWin)
{
CompressZip(TracerHomeDirectory, WindowsTracerHomeZip, fileMode: FileMode.Create);
}
else if (IsLinux)
{
var fpm = Fpm.Value;
var gzip = GZip.Value;
var packageName = "datadog-dotnet-apm";
var workingDirectory = ArtifactsDirectory / $"linux-{LinuxArchitectureIdentifier}";
EnsureCleanDirectory(workingDirectory);
foreach (var packageType in LinuxPackageTypes)
{
var args = new[]
{
"-f",
"-s dir",
$"-t {packageType}",
$"-n {packageName}",
$"-v {Version}",
packageType == "tar" ? "" : "--prefix /opt/datadog",
$"--chdir {TracerHomeDirectory}",
"netstandard2.0/",
"netcoreapp3.1/",
"Datadog.Trace.ClrProfiler.Native.so",
"integrations.json",
"createLogPath.sh",
};
var arguments = string.Join(" ", args);
fpm(arguments, workingDirectory: workingDirectory);
}
gzip($"-f {packageName}.tar", workingDirectory: workingDirectory);
var suffix = RuntimeInformation.ProcessArchitecture == Architecture.X64
? string.Empty
: $".{RuntimeInformation.ProcessArchitecture.ToString().ToLower()}";
var versionedName = IsAlpine
? $"{packageName}-{Version}-musl{suffix}.tar.gz"
: $"{packageName}-{Version}{suffix}.tar.gz";
RenameFile(
workingDirectory / $"{packageName}.tar.gz",
workingDirectory / versionedName);
}
});
Target CompileManagedTestHelpers => _ => _
.Unlisted()
.After(Restore)
.After(CompileManagedSrc)
.Executes(() =>
{
// Always AnyCPU
DotNetMSBuild(x => x
.SetTargetPath(MsBuildProject)
.SetConfiguration(BuildConfiguration)
.SetTargetPlatformAnyCPU()
.DisableRestore()
.SetProperty("BuildProjectReferences", false)
.SetTargets("BuildCsharpTestHelpers"));
});
Target CompileManagedUnitTests => _ => _
.Unlisted()
.After(Restore)
.After(CompileManagedSrc)
.Executes(() =>
{
// Always AnyCPU
DotNetMSBuild(x => x
.SetTargetPath(MsBuildProject)
.SetConfiguration(BuildConfiguration)
.SetTargetPlatformAnyCPU()
.DisableRestore()
.SetProperty("BuildProjectReferences", false)
.SetTargets("BuildCsharpUnitTests"));
});
Target RunManagedUnitTests => _ => _
.Unlisted()
.After(CompileManagedUnitTests)
.Executes(() =>
{
var testProjects = RootDirectory.GlobFiles("test/**/*.Tests.csproj")
.Select(x => Solution.GetProject(x))
.ToList();
testProjects.ForEach(EnsureResultsDirectory);
try
{
DotNetTest(x => x
.EnableNoRestore()
.EnableNoBuild()
.SetConfiguration(BuildConfiguration)
.SetTargetPlatformAnyCPU()
.SetDDEnvironmentVariables("dd-tracer-dotnet")
.EnableMemoryDumps()
.When(CodeCoverage, ConfigureCodeCoverage)
.CombineWith(testProjects, (x, project) => x
.EnableTrxLogOutput(GetResultsDirectory(project))
.SetProjectFile(project)));
}
finally
{
MoveLogsToBuildData();
}
});
Target RunNativeTestsWindows => _ => _
.Unlisted()
.After(CompileNativeSrcWindows)
.After(CompileNativeTestsWindows)
.OnlyWhenStatic(() => IsWin)
.Executes(() =>
{
var workingDirectory = TestsDirectory / "Datadog.Trace.ClrProfiler.Native.Tests" / "bin" / BuildConfiguration.ToString() / Platform.ToString();
var exePath = workingDirectory / "Datadog.Trace.ClrProfiler.Native.Tests.exe";
var testExe = ToolResolver.GetLocalTool(exePath);
testExe("--gtest_output=xml", workingDirectory: workingDirectory);
});
Target RunNativeTestsLinux => _ => _
.Unlisted()
.After(CompileNativeSrcLinux)
.After(CompileNativeTestsLinux)
.OnlyWhenStatic(() => IsLinux)
.Executes(() =>
{
Logger.Error("We don't currently run unit tests on Linux");
});
Target RunNativeTests => _ => _
.Unlisted()
.DependsOn(RunNativeTestsWindows)
.DependsOn(RunNativeTestsLinux);
Target CompileDependencyLibs => _ => _
.Unlisted()
.After(Restore)
.After(CompileManagedSrc)
.Executes(() =>
{
// Always AnyCPU
DotNetMSBuild(x => x
.SetTargetPath(MsBuildProject)
.SetConfiguration(BuildConfiguration)
.SetTargetPlatformAnyCPU()
.DisableRestore()
.EnableNoDependencies()
.SetTargets("BuildDependencyLibs")
);
});
Target CompileRegressionDependencyLibs => _ => _
.Unlisted()
.After(Restore)
.After(CompileManagedSrc)
.Executes(() =>
{
// We run linux integration tests in AnyCPU, but Windows on the specific architecture
var platform = IsLinux ? MSBuildTargetPlatform.MSIL : Platform;
DotNetMSBuild(x => x
.SetTargetPath(MsBuildProject)
.SetTargetPlatformAnyCPU()
.DisableRestore()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(platform)
.SetTargets("BuildRegressionDependencyLibs")
);
});
Target CompileRegressionSamples => _ => _
.Unlisted()
.After(Restore)
.After(CreatePlatformlessSymlinks)
.After(CompileRegressionDependencyLibs)
.Executes(() =>
{
// explicitly build the other dependency (with restore to avoid runtime identifier dependency issues)
DotNetBuild(x => x
.SetProjectFile(Solution.GetProject(Projects.ApplicationWithLog4Net))
// .EnableNoRestore()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(Platform)
.SetNoWarnDotNetCore3()
.When(!string.IsNullOrEmpty(NugetPackageDirectory), o =>
o.SetPackageDirectory(NugetPackageDirectory)));
var regressionsDirectory = Solution.GetProject(Projects.EntityFramework6xMdTokenLookupFailure)
.Directory.Parent;
var regressionLibs = GlobFiles(regressionsDirectory / "**" / "*.csproj")
.Where(x => !x.Contains("EntityFramework6x.MdTokenLookupFailure")
&& !x.Contains("ExpenseItDemo")
&& !x.Contains("StackExchange.Redis.AssemblyConflict.LegacyProject")
&& !x.Contains("dependency-libs"));
// Allow restore here, otherwise things go wonky with runtime identifiers
// in some target frameworks. No, I don't know why
DotNetBuild(x => x
// .EnableNoRestore()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(Platform)
.SetNoWarnDotNetCore3()
.When(!string.IsNullOrEmpty(NugetPackageDirectory), o =>
o.SetPackageDirectory(NugetPackageDirectory))
.CombineWith(regressionLibs, (x, project) => x
.SetProjectFile(project)));
});
Target CompileFrameworkReproductions => _ => _
.Unlisted()
.Description("Builds .NET Framework projects (non SDK-based projects)")
.After(CompileRegressionDependencyLibs)
.After(CompileDependencyLibs)
.After(CreatePlatformlessSymlinks)
.Requires(() => IsWin)
.Executes(() =>
{
// We have to use the full MSBuild here, as dotnet msbuild doesn't copy the EDMX assets for embedding correctly
// seems similar to https://github.com/dotnet/sdk/issues/8360
MSBuild(s => s
.SetTargetPath(MsBuildProject)
.SetMSBuildPath()
.DisableRestore()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(Platform)
.SetTargets("BuildFrameworkReproductions")
.SetMaxCpuCount(null));
});
Target CompileIntegrationTests => _ => _
.Unlisted()
.After(CompileManagedSrc)
.After(CompileRegressionSamples)
.After(CompileFrameworkReproductions)
.After(PublishIisSamples)
.Requires(() => TracerHomeDirectory != null)
.Executes(() =>
{
DotNetMSBuild(s => s
.SetTargetPath(MsBuildProject)
.DisableRestore()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(Platform)
.SetProperty("ManagedProfilerOutputDirectory", TracerHomeDirectory)
.SetTargets("BuildCsharpIntegrationTests")
.SetMaxCpuCount(null));
});
Target CompileSamples => _ => _
.Unlisted()
.After(CompileDependencyLibs)
.After(CreatePlatformlessSymlinks)
.After(CompileFrameworkReproductions)
.Requires(() => TracerHomeDirectory != null)
.Executes(() =>
{
// This does some "unnecessary" rebuilding and restoring
var include = RootDirectory.GlobFiles("test/test-applications/integrations/**/*.csproj");
var exclude = RootDirectory.GlobFiles("test/test-applications/integrations/dependency-libs/**/*.csproj");
var projects = include.Where(projectPath =>
projectPath switch
{
_ when exclude.Contains(projectPath) => false,
_ when projectPath.ToString().Contains("Samples.OracleMDA") => false,
_ => true,
}
);
DotNetBuild(config => config
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(Platform)
.EnableNoDependencies()
.SetProperty("BuildInParallel", "false")
.SetProperty("ManagedProfilerOutputDirectory", TracerHomeDirectory)
.SetProperty("ExcludeManagedProfiler", true)
.SetProperty("ExcludeNativeProfiler", true)
.SetProperty("LoadManagedProfilerFromProfilerDirectory", false)
.CombineWith(projects, (s, project) => s
.SetProjectFile(project)));
});
Target PublishIisSamples => _ => _
.Unlisted()
.After(CompileManagedTestHelpers)
.After(CompileRegressionSamples)
.After(CompileFrameworkReproductions)
.Executes(() =>
{
var aspnetFolder = TestsDirectory / "test-applications" / "aspnet";
var aspnetProjects = aspnetFolder.GlobFiles("**/*.csproj");
var publishProfile = aspnetFolder / "PublishProfiles" / "FolderProfile.pubxml";
MSBuild(x => x
.SetMSBuildPath()
// .DisableRestore()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(Platform)
.SetProperty("DeployOnBuild", true)
.SetProperty("PublishProfile", publishProfile)
.SetMaxCpuCount(null)
.CombineWith(aspnetProjects, (c, project) => c
.SetTargetPath(project))
);
});
Target RunWindowsIntegrationTests => _ => _
.Unlisted()
.After(BuildTracerHome)
.After(CompileIntegrationTests)
.After(CompileSamples)
.After(CompileFrameworkReproductions)
.After(BuildWindowsIntegrationTests)
.Requires(() => IsWin)
.Executes(() =>
{
ParallelIntegrationTests.ForEach(EnsureResultsDirectory);
ClrProfilerIntegrationTests.ForEach(EnsureResultsDirectory);
try
{
DotNetTest(config => config
.SetDotnetPath(Platform)
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(Platform)
.EnableNoRestore()
.EnableNoBuild()
.When(!string.IsNullOrEmpty(Filter), c => c.SetFilter(Filter))
.When(CodeCoverage, ConfigureCodeCoverage)
.CombineWith(ParallelIntegrationTests, (s, project) => s
.EnableTrxLogOutput(GetResultsDirectory(project))
.SetProjectFile(project)), degreeOfParallelism: 4);
// TODO: I think we should change this filter to run on Windows by default
// (RunOnWindows!=False|Category=Smoke)&LoadFromGAC!=True&IIS!=True
DotNetTest(config => config
.SetDotnetPath(Platform)
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(Platform)
.EnableNoRestore()
.EnableNoBuild()
.SetFilter(Filter ?? "RunOnWindows=True&LoadFromGAC!=True&IIS!=True")
.When(CodeCoverage, ConfigureCodeCoverage)
.CombineWith(ClrProfilerIntegrationTests, (s, project) => s
.EnableTrxLogOutput(GetResultsDirectory(project))
.SetProjectFile(project)));
}
finally
{
MoveLogsToBuildData();
}
});
Target RunWindowsRegressionTests => _ => _
.Unlisted()
.After(BuildTracerHome)
.After(CompileIntegrationTests)
.After(CompileRegressionSamples)
.After(CompileFrameworkReproductions)
.Requires(() => IsWin)
.Executes(() =>
{
ClrProfilerIntegrationTests.ForEach(EnsureResultsDirectory);
try
{
DotNetTest(config => config
.SetDotnetPath(Platform)
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(Platform)
.EnableNoRestore()
.EnableNoBuild()
.SetFilter(Filter ?? "Category=Smoke&LoadFromGAC!=True")
.When(CodeCoverage, ConfigureCodeCoverage)
.CombineWith(ClrProfilerIntegrationTests, (s, project) => s
.EnableTrxLogOutput(GetResultsDirectory(project))
.SetProjectFile(project)));
}
finally
{
MoveLogsToBuildData();
}
});
Target RunWindowsIisIntegrationTests => _ => _
.After(BuildTracerHome)
.After(CompileIntegrationTests)
.After(CompileSamples)
.After(CompileFrameworkReproductions)
.After(PublishIisSamples)
.Executes(() =>
{
ClrProfilerIntegrationTests.ForEach(EnsureResultsDirectory);
try
{
// Different filter from RunWindowsIntegrationTests
DotNetTest(config => config
.SetDotnetPath(Platform)
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(Platform)
.When(Framework != null, o => o.SetFramework(Framework))
.EnableNoRestore()
.EnableNoBuild()
.SetFilter(Filter ?? "(RunOnWindows=True)&LoadFromGAC=True")
.When(CodeCoverage, ConfigureCodeCoverage)
.CombineWith(ClrProfilerIntegrationTests, (s, project) => s
.EnableTrxLogOutput(GetResultsDirectory(project))
.SetProjectFile(project)));
}
finally
{
MoveLogsToBuildData();
}
});
Target CompileSamplesLinux => _ => _
.Unlisted()
.After(CompileManagedSrc)
.After(CompileRegressionDependencyLibs)
.After(CompileDependencyLibs)
.After(CompileManagedTestHelpers)
.Requires(() => TracerHomeDirectory != null)
.Requires(() => Framework)
.Executes(() =>
{
// There's nothing specifically linux-y here, it's just that we only build a subset of projects
// for testing on linux.
var sampleProjects = RootDirectory.GlobFiles("test/test-applications/integrations/*/*.csproj");
var regressionProjects = RootDirectory.GlobFiles("test/test-applications/regression/*/*.csproj");
var instrumentationProjects = RootDirectory.GlobFiles("test/test-applications/instrumentation/*/*.csproj");
// These samples are currently skipped.
var projectsToSkip = new[]
{
"Samples.Msmq", // Doesn't run on Linux
"Samples.Owin.WebApi2", // Doesn't run on Linux
"Samples.MultiDomainHost.Runner",
"Samples.RateLimiter", // I think we _should_ run this one (assuming it has tests)
"Samples.SqlServer.NetFramework20",
"Samples.TracingWithoutLimits", // I think we _should_ run this one (assuming it has tests)
"Samples.Wcf",
"Samples.WebRequest.NetFramework20",
"AutomapperTest", // I think we _should_ run this one (assuming it has tests)
"DogStatsD.RaceCondition",
"EntityFramework6x.MdTokenLookupFailure",
"LargePayload", // I think we _should_ run this one (assuming it has tests)
"Log4Net.SerializationException",
"NLog10LogsInjection.NullReferenceException",
"Sandbox.ManualTracing",
"StackExchange.Redis.AssemblyConflict.LegacyProject",
};
// These sample projects are built using RestoreAndBuildSamplesForPackageVersions
// so no point building them now
// TODO: Load this list dynamically
var multiApiProjects = new[]
{
"Samples.CosmosDb",
"Samples.MongoDB",
"Samples.Elasticsearch",
"Samples.Elasticsearch.V5",
"Samples.Kafka",
"Samples.Npgsql",
"Samples.RabbitMQ",
"Samples.SqlServer",
"Samples.Microsoft.Data.SqlClient",
"Samples.StackExchange.Redis",
"Samples.ServiceStack.Redis",
// "Samples.MySql", - the "non package version" is _ALSO_ tested separately
"Samples.Microsoft.Data.Sqlite",
"Samples.OracleMDA",
"Samples.OracleMDA.Core",
"Samples.XUnitTests",
"Samples.NUnitTests",
"Samples.MSTestTests",
};
var projectsToBuild = sampleProjects
.Concat(regressionProjects)
.Concat(instrumentationProjects)
.Where(path =>
{
var project = Solution.GetProject(path);
return project?.Name switch
{
"Samples.AspNetCoreMvc21" => Framework == TargetFramework.NETCOREAPP2_1,
"Samples.AspNetCoreMvc30" => Framework == TargetFramework.NETCOREAPP3_0,
"Samples.AspNetCoreMvc31" => Framework == TargetFramework.NETCOREAPP3_1,
var name when projectsToSkip.Contains(name) => false,
var name when multiApiProjects.Contains(name) => false,
_ => true,
};
});
// do the build and publish separately to avoid dependency issues
// Always AnyCPU
DotNetBuild(x => x
// .EnableNoRestore()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetFramework(Framework)
// .SetTargetPlatform(Platform)
.SetNoWarnDotNetCore3()
.SetProperty("ExcludeManagedProfiler", "true")
.SetProperty("ExcludeNativeProfiler", "true")
.SetProperty("ManagedProfilerOutputDirectory", TracerHomeDirectory)
.When(TestAllPackageVersions, o => o.SetProperty("TestAllPackageVersions", "true"))
.When(!string.IsNullOrEmpty(NugetPackageDirectory), o => o.SetPackageDirectory(NugetPackageDirectory))
.CombineWith(projectsToBuild, (c, project) => c
.SetProjectFile(project)));
// Always AnyCPU
DotNetPublish(x => x
.EnableNoRestore()
.EnableNoBuild()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetFramework(Framework)
// .SetTargetPlatform(Platform)
.SetNoWarnDotNetCore3()
.SetProperty("ManagedProfilerOutputDirectory", TracerHomeDirectory)
.When(TestAllPackageVersions, o => o.SetProperty("TestAllPackageVersions", "true"))
.When(!string.IsNullOrEmpty(NugetPackageDirectory), o => o.SetPackageDirectory(NugetPackageDirectory))
.CombineWith(projectsToBuild, (c, project) => c
.SetProject(project)));
});
Target CompileMultiApiPackageVersionSamples => _ => _
.Unlisted()
.After(CompileManagedSrc)
.After(CompileRegressionDependencyLibs)
.After(CompileDependencyLibs)
.After(CompileManagedTestHelpers)
.After(CompileSamplesLinux)
.Requires(() => TracerHomeDirectory != null)
.Requires(() => Framework)
.Executes(() =>
{
// Build and restore for all versions
// Annoyingly this rebuilds everything again and again.
var targets = new[] { "RestoreSamplesForPackageVersionsOnly", "RestoreAndBuildSamplesForPackageVersionsOnly" };
// /nowarn:NU1701 - Package 'x' was restored using '.NETFramework,Version=v4.6.1' instead of the project target framework '.NETCoreApp,Version=v2.1'.
DotNetMSBuild(x => x
.SetTargetPath(MsBuildProject)
.SetConfiguration(BuildConfiguration)
.EnableNoDependencies()
.SetProperty("TargetFramework", Framework.ToString())
.SetProperty("ManagedProfilerOutputDirectory", TracerHomeDirectory)
.SetProperty("BuildInParallel", "true")
.SetProperty("ExcludeManagedProfiler", "true")
.SetProperty("ExcludeNativeProfiler", "true")
.SetProcessArgumentConfigurator(arg => arg.Add("/nowarn:NU1701"))
.AddProcessEnvironmentVariable("TestAllPackageVersions", "true")
.When(TestAllPackageVersions, o => o.SetProperty("TestAllPackageVersions", "true"))
.CombineWith(targets, (c, target) => c.SetTargets(target))
);
});
Target CompileLinuxIntegrationTests => _ => _
.Unlisted()
.After(CompileManagedSrc)
.After(CompileRegressionDependencyLibs)
.After(CompileDependencyLibs)
.After(CompileManagedTestHelpers)
.After(CompileSamplesLinux)
.After(CompileMultiApiPackageVersionSamples)
.Requires(() => TracerHomeDirectory != null)
.Requires(() => Framework)
.Executes(() =>
{
// Build the actual integration test projects for Any CPU
var integrationTestProjects = RootDirectory.GlobFiles("test/*.IntegrationTests/*.csproj");
DotNetBuild(x => x
// .EnableNoRestore()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetFramework(Framework)
// .SetTargetPlatform(Platform)
.SetNoWarnDotNetCore3()
.When(TestAllPackageVersions, o => o
.SetProperty("TestAllPackageVersions", "true"))
.AddProcessEnvironmentVariable("TestAllPackageVersions", "true")
.AddProcessEnvironmentVariable("ManagedProfilerOutputDirectory", TracerHomeDirectory)
.When(!string.IsNullOrEmpty(NugetPackageDirectory), o =>
o.SetPackageDirectory(NugetPackageDirectory))
.CombineWith(integrationTestProjects, (c, project) => c
.SetProjectFile(project)));
// Not sure if/why this is necessary, and we can't just point to the correct output location
var src = TracerHomeDirectory;
var testProject = Solution.GetProject(Projects.ClrProfilerIntegrationTests).Directory;
var dest = testProject / "bin" / BuildConfiguration / Framework / "profiler-lib";
CopyDirectoryRecursively(src, dest, DirectoryExistsPolicy.Merge, FileExistsPolicy.Overwrite);
// not sure exactly where this is supposed to go, may need to change the original build
foreach (var linuxDir in TracerHomeDirectory.GlobDirectories("linux-*"))
{
CopyDirectoryRecursively(linuxDir, dest, DirectoryExistsPolicy.Merge, FileExistsPolicy.Overwrite);
}
});
Target RunLinuxIntegrationTests => _ => _
.After(CompileLinuxIntegrationTests)
.Description("Runs the linux integration tests")
.Requires(() => Framework)
.Requires(() => IsLinux)
.Executes(() =>
{
ParallelIntegrationTests.ForEach(EnsureResultsDirectory);
ClrProfilerIntegrationTests.ForEach(EnsureResultsDirectory);
var filter = (string.IsNullOrEmpty(Filter), IsArm64) switch
{
(true, false) => "Category!=LinuxUnsupported",
(true, true) => "(Category!=ArmUnsupported)&(Category!=LinuxUnsupported",
_ => Filter
};
try
{
// Run these ones in parallel
// Always AnyCPU
DotNetTest(config => config
.SetConfiguration(BuildConfiguration)
// .SetTargetPlatform(Platform)
.EnableNoRestore()
.EnableNoBuild()
.SetFramework(Framework)
.EnableMemoryDumps()
.SetFilter(filter)
.When(TestAllPackageVersions, o => o
.SetProcessEnvironmentVariable("TestAllPackageVersions", "true"))
.When(CodeCoverage, ConfigureCodeCoverage)
.CombineWith(ParallelIntegrationTests, (s, project) => s
.EnableTrxLogOutput(GetResultsDirectory(project))
.SetProjectFile(project)),
degreeOfParallelism: 2);
// Run this one separately so we can tail output
DotNetTest(config => config
.SetConfiguration(BuildConfiguration)
// .SetTargetPlatform(Platform)
.EnableNoRestore()
.EnableNoBuild()
.SetFramework(Framework)
.EnableMemoryDumps()
.SetFilter(filter)
.When(TestAllPackageVersions, o => o
.SetProcessEnvironmentVariable("TestAllPackageVersions", "true"))
.When(CodeCoverage, ConfigureCodeCoverage)
.CombineWith(ClrProfilerIntegrationTests, (s, project) => s
.EnableTrxLogOutput(GetResultsDirectory(project))
.SetProjectFile(project))
);
}
finally
{
MoveLogsToBuildData();
}
});
private AbsolutePath GetResultsDirectory(Project proj) => BuildDataDirectory / "results" / proj.Name;
private void EnsureResultsDirectory(Project proj) => EnsureCleanDirectory(GetResultsDirectory(proj));
private void MoveLogsToBuildData()
{
if (Directory.Exists(TracerLogDirectory))
{
CopyDirectoryRecursively(TracerLogDirectory, BuildDataDirectory / "logs",
DirectoryExistsPolicy.Merge, FileExistsPolicy.Overwrite);
}
if (Directory.Exists(TempDirectory))
{
foreach (var dump in GlobFiles(TempDirectory, "coredump*"))
{
MoveFileToDirectory(dump, BuildDataDirectory / "dumps", FileExistsPolicy.Overwrite);
}
}
}
private DotNetTestSettings ConfigureCodeCoverage(DotNetTestSettings settings)
{
var strongNameKeyPath = Solution.Directory / "Datadog.Trace.snk";
return settings.SetDataCollector("XPlat Code Coverage")
.SetProcessArgumentConfigurator(
args =>
args.Add("--")
.Add("RunConfiguration.DisableAppDomain=true") // https://github.com/coverlet-coverage/coverlet/issues/347
.Add("DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.SkipAutoProps=true")
.Add("DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.Format=cobertura")
.Add($"DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.StrongNameKey=\"{strongNameKeyPath}\"")
.Add("DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.Exclude=[*]Datadog.Trace.Vendors.*,")
.Add("DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.Include=\"[Datadog.Trace.ClrProfiler.*]*,[Datadog.Trace]*,[Datadog.Trace.AspNet]*\""));
}
protected override void OnTargetStart(string target)
{
if (PrintDriveSpace)
{
foreach (var drive in DriveInfo.GetDrives().Where(d => d.IsReady))
{
Logger.Info($"Drive space available on '{drive.Name}': {PrettyPrint(drive.AvailableFreeSpace)} / {PrettyPrint(drive.TotalSize)}");
}
}
base.OnTargetStart(target);
static string PrettyPrint(long bytes)
{
var power = Math.Min((int)Math.Log(bytes, 1000), 4);
var normalised = bytes / Math.Pow(1000, power);
return power switch
{
4 => $"{normalised:F}TB",
3 => $"{normalised:F}GB",
2 => $"{normalised:F}MB",
1 => $"{normalised:F}KB",
_ => $"{bytes}B",
};
}
}
}
| 1 | 21,447 | I assume we have a package reference somewhere that pulls down the nuget package? | DataDog-dd-trace-dotnet | .cs |
@@ -575,12 +575,9 @@ func (s *Server) initEventTracking() {
}
s.sys.inboxPre = subject
// This is for remote updates for connection accounting.
-
- for _, subj := range []string{accConnsEventSubjOld, accConnsEventSubjNew} {
- subject = fmt.Sprintf(subj, "*")
- if _, err := s.sysSubscribe(subject, s.remoteConnsUpdate); err != nil {
- s.Errorf("Error setting up internal tracking for %s: %v", subject, err)
- }
+ subject = fmt.Sprintf(accConnsEventSubjOld, "*")
+ if _, err := s.sysSubscribe(subject, s.remoteConnsUpdate); err != nil {
+ s.Errorf("Error setting up internal tracking for %s: %v", subject, err)
}
// This will be for responses for account info that we send out.
subject = fmt.Sprintf(connsRespSubj, s.info.ID) | 1 | // Copyright 2018-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net/http"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nats-server/v2/server/pse"
)
const (
accLookupReqTokens = 6
accLookupReqSubj = "$SYS.REQ.ACCOUNT.%s.CLAIMS.LOOKUP"
accPackReqSubj = "$SYS.REQ.CLAIMS.PACK"
connectEventSubj = "$SYS.ACCOUNT.%s.CONNECT"
disconnectEventSubj = "$SYS.ACCOUNT.%s.DISCONNECT"
accReqSubj = "$SYS.REQ.ACCOUNT.%s.%s"
// kept for backward compatibility when using http resolver
// this overlaps with the names for events but you'd have to have the operator private key in order to succeed.
accUpdateEventSubjOld = "$SYS.ACCOUNT.%s.CLAIMS.UPDATE"
accUpdateEventSubjNew = "$SYS.REQ.ACCOUNT.%s.CLAIMS.UPDATE"
connsRespSubj = "$SYS._INBOX_.%s"
accConnsEventSubjNew = "$SYS.ACCOUNT.%s.SERVER.CONNS"
accConnsEventSubjOld = "$SYS.SERVER.ACCOUNT.%s.CONNS" // kept for backward compatibility
shutdownEventSubj = "$SYS.SERVER.%s.SHUTDOWN"
authErrorEventSubj = "$SYS.SERVER.%s.CLIENT.AUTH.ERR"
serverStatsSubj = "$SYS.SERVER.%s.STATSZ"
serverDirectReqSubj = "$SYS.REQ.SERVER.%s.%s"
serverPingReqSubj = "$SYS.REQ.SERVER.PING.%s"
serverStatsPingReqSubj = "$SYS.REQ.SERVER.PING" // use $SYS.REQ.SERVER.PING.STATSZ instead
leafNodeConnectEventSubj = "$SYS.ACCOUNT.%s.LEAFNODE.CONNECT"
remoteLatencyEventSubj = "$SYS.LATENCY.M2.%s"
inboxRespSubj = "$SYS._INBOX.%s.%s"
// FIXME(dlc) - Should account scope, even with wc for now, but later on
// we can then shard as needed.
accNumSubsReqSubj = "$SYS.REQ.ACCOUNT.NSUBS"
// These are for exported debug services. These are local to this server only.
accSubsSubj = "$SYS.DEBUG.SUBSCRIBERS"
shutdownEventTokens = 4
serverSubjectIndex = 2
accUpdateTokensNew = 6
accUpdateTokensOld = 5
accUpdateAccIdxOld = 2
accReqTokens = 5
accReqAccIndex = 3
)
// FIXME(dlc) - make configurable.
var eventsHBInterval = 30 * time.Second
// Used to send and receive messages from inside the server.
type internal struct {
account *Account
client *client
seq uint64
sid int
servers map[string]*serverUpdate
sweeper *time.Timer
stmr *time.Timer
replies map[string]msgHandler
sendq chan *pubMsg
resetCh chan struct{}
wg sync.WaitGroup
orphMax time.Duration
chkOrph time.Duration
statsz time.Duration
shash string
inboxPre string
}
// ServerStatsMsg is sent periodically with stats updates.
type ServerStatsMsg struct {
Server ServerInfo `json:"server"`
Stats ServerStats `json:"statsz"`
}
// ConnectEventMsg is sent when a new connection is made that is part of an account.
type ConnectEventMsg struct {
TypedEvent
Server ServerInfo `json:"server"`
Client ClientInfo `json:"client"`
}
// ConnectEventMsgType is the schema type for ConnectEventMsg
const ConnectEventMsgType = "io.nats.server.advisory.v1.client_connect"
// DisconnectEventMsg is sent when a new connection previously defined from a
// ConnectEventMsg is closed.
type DisconnectEventMsg struct {
TypedEvent
Server ServerInfo `json:"server"`
Client ClientInfo `json:"client"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
Reason string `json:"reason"`
}
// DisconnectEventMsgType is the schema type for DisconnectEventMsg
const DisconnectEventMsgType = "io.nats.server.advisory.v1.client_disconnect"
// AccountNumConns is an event that will be sent from a server that is tracking
// a given account when the number of connections changes. It will also HB
// updates in the absence of any changes.
type AccountNumConns struct {
Server ServerInfo `json:"server"`
Account string `json:"acc"`
Conns int `json:"conns"`
LeafNodes int `json:"leafnodes"`
TotalConns int `json:"total_conns"`
}
// accNumConnsReq is sent when we are starting to track an account for the first
// time. We will request others send info to us about their local state.
type accNumConnsReq struct {
Server ServerInfo `json:"server"`
Account string `json:"acc"`
}
// ServerInfo identifies remote servers.
type ServerInfo struct {
Name string `json:"name"`
Host string `json:"host"`
ID string `json:"id"`
Cluster string `json:"cluster,omitempty"`
Version string `json:"ver"`
Seq uint64 `json:"seq"`
JetStream bool `json:"jetstream"`
Time time.Time `json:"time"`
}
// ClientInfo is detailed information about the client forming a connection.
type ClientInfo struct {
Start time.Time `json:"start,omitempty"`
Host string `json:"host,omitempty"`
ID uint64 `json:"id"`
Account string `json:"acc"`
User string `json:"user,omitempty"`
Name string `json:"name,omitempty"`
Lang string `json:"lang,omitempty"`
Version string `json:"ver,omitempty"`
RTT string `json:"rtt,omitempty"`
Server string `json:"server,omitempty"`
Stop *time.Time `json:"stop,omitempty"`
}
// ServerStats hold various statistics that we will periodically send out.
type ServerStats struct {
Start time.Time `json:"start"`
Mem int64 `json:"mem"`
Cores int `json:"cores"`
CPU float64 `json:"cpu"`
Connections int `json:"connections"`
TotalConnections uint64 `json:"total_connections"`
ActiveAccounts int `json:"active_accounts"`
NumSubs uint32 `json:"subscriptions"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
SlowConsumers int64 `json:"slow_consumers"`
Routes []*RouteStat `json:"routes,omitempty"`
Gateways []*GatewayStat `json:"gateways,omitempty"`
}
// RouteStat holds route statistics.
type RouteStat struct {
ID uint64 `json:"rid"`
Name string `json:"name,omitempty"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
Pending int `json:"pending"`
}
// GatewayStat holds gateway statistics.
type GatewayStat struct {
ID uint64 `json:"gwid"`
Name string `json:"name"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
NumInbound int `json:"inbound_connections"`
}
// DataStats reports how may msg and bytes. Applicable for both sent and received.
type DataStats struct {
Msgs int64 `json:"msgs"`
Bytes int64 `json:"bytes"`
}
// Used for internally queueing up messages that the server wants to send.
type pubMsg struct {
acc *Account
sub string
rply string
si *ServerInfo
msg interface{}
last bool
}
// Used to track server updates.
type serverUpdate struct {
seq uint64
ltime time.Time
}
// TypedEvent is a event or advisory sent by the server that has nats type hints
// typically used for events that might be consumed by 3rd party event systems
type TypedEvent struct {
Type string `json:"type"`
ID string `json:"id"`
Time time.Time `json:"timestamp"`
}
// internalSendLoop will be responsible for serializing all messages that
// a server wants to send.
func (s *Server) internalSendLoop(wg *sync.WaitGroup) {
defer wg.Done()
RESET:
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.Unlock()
return
}
c := s.sys.client
resetCh := s.sys.resetCh
sysacc := s.sys.account
sendq := s.sys.sendq
id := s.info.ID
host := s.info.Host
servername := s.info.Name
seqp := &s.sys.seq
js := s.js != nil
cluster := s.info.Cluster
if s.gateway.enabled {
cluster = s.getGatewayName()
}
s.mu.Unlock()
// Warn when internal send queue is backed up past 75%
warnThresh := 3 * internalSendQLen / 4
warnFreq := time.Second
last := time.Now().Add(-warnFreq)
for s.eventsRunning() {
// Setup information for next message
if len(sendq) > warnThresh && time.Since(last) >= warnFreq {
s.Warnf("Internal system send queue > 75%%")
last = time.Now()
}
select {
case pm := <-sendq:
if pm.si != nil {
pm.si.Name = servername
pm.si.Host = host
pm.si.Cluster = cluster
pm.si.ID = id
pm.si.Seq = atomic.AddUint64(seqp, 1)
pm.si.Version = VERSION
pm.si.Time = time.Now()
pm.si.JetStream = js
}
var b []byte
if pm.msg != nil {
switch v := pm.msg.(type) {
case string:
b = []byte(v)
case []byte:
b = v
default:
b, _ = json.MarshalIndent(pm.msg, _EMPTY_, " ")
}
}
c.mu.Lock()
// We can have an override for account here.
if pm.acc != nil {
c.acc = pm.acc
} else {
c.acc = sysacc
}
// Prep internal structures needed to send message.
c.pa.subject = []byte(pm.sub)
c.pa.size = len(b)
c.pa.szb = []byte(strconv.FormatInt(int64(len(b)), 10))
c.pa.reply = []byte(pm.rply)
trace := c.trace
c.mu.Unlock()
// Add in NL
b = append(b, _CRLF_...)
if trace {
c.traceInOp(fmt.Sprintf("PUB %s %s %d",
c.pa.subject, c.pa.reply, c.pa.size), nil)
c.traceMsg(b)
}
c.processInboundClientMsg(b)
// See if we are doing graceful shutdown.
if !pm.last {
c.flushClients(0) // Never spend time in place.
} else {
// For the Shutdown event, we need to send in place otherwise
// there is a chance that the process will exit before the
// writeLoop has a chance to send it.
c.flushClients(time.Second)
return
}
case <-resetCh:
goto RESET
case <-s.quitCh:
return
}
}
}
// Will send a shutdown message.
func (s *Server) sendShutdownEvent() {
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.Unlock()
return
}
subj := fmt.Sprintf(shutdownEventSubj, s.info.ID)
sendq := s.sys.sendq
// Stop any more messages from queueing up.
s.sys.sendq = nil
// Unhook all msgHandlers. Normal client cleanup will deal with subs, etc.
s.sys.replies = nil
s.mu.Unlock()
// Send to the internal queue and mark as last.
sendq <- &pubMsg{nil, subj, _EMPTY_, nil, nil, true}
}
// Used to send an internal message to an arbitrary account.
func (s *Server) sendInternalAccountMsg(a *Account, subject string, msg interface{}) error {
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.Unlock()
return ErrNoSysAccount
}
sendq := s.sys.sendq
// Don't hold lock while placing on the channel.
s.mu.Unlock()
sendq <- &pubMsg{a, subject, "", nil, msg, false}
return nil
}
// This will queue up a message to be sent.
// Lock should not be held.
func (s *Server) sendInternalMsgLocked(sub, rply string, si *ServerInfo, msg interface{}) {
s.mu.Lock()
s.sendInternalMsg(sub, rply, si, msg)
s.mu.Unlock()
}
// This will queue up a message to be sent.
// Assumes lock is held on entry.
func (s *Server) sendInternalMsg(sub, rply string, si *ServerInfo, msg interface{}) {
if s.sys == nil || s.sys.sendq == nil {
return
}
sendq := s.sys.sendq
// Don't hold lock while placing on the channel.
s.mu.Unlock()
sendq <- &pubMsg{nil, sub, rply, si, msg, false}
s.mu.Lock()
}
// Locked version of checking if events system running. Also checks server.
func (s *Server) eventsRunning() bool {
s.mu.Lock()
er := s.running && s.eventsEnabled()
s.mu.Unlock()
return er
}
// EventsEnabled will report if the server has internal events enabled via
// a defined system account.
func (s *Server) EventsEnabled() bool {
s.mu.Lock()
ee := s.eventsEnabled()
s.mu.Unlock()
return ee
}
// eventsEnabled will report if events are enabled.
// Lock should be held.
func (s *Server) eventsEnabled() bool {
return s.sys != nil && s.sys.client != nil && s.sys.account != nil
}
// TrackedRemoteServers returns how many remote servers we are tracking
// from a system events perspective.
func (s *Server) TrackedRemoteServers() int {
s.mu.Lock()
if !s.running || !s.eventsEnabled() {
return -1
}
ns := len(s.sys.servers)
s.mu.Unlock()
return ns
}
// Check for orphan servers who may have gone away without notification.
// This should be wrapChk() to setup common locking.
func (s *Server) checkRemoteServers() {
now := time.Now()
for sid, su := range s.sys.servers {
if now.Sub(su.ltime) > s.sys.orphMax {
s.Debugf("Detected orphan remote server: %q", sid)
// Simulate it going away.
s.processRemoteServerShutdown(sid)
delete(s.sys.servers, sid)
}
}
if s.sys.sweeper != nil {
s.sys.sweeper.Reset(s.sys.chkOrph)
}
}
// Grab RSS and PCPU
func updateServerUsage(v *ServerStats) {
var rss, vss int64
var pcpu float64
pse.ProcUsage(&pcpu, &rss, &vss)
v.Mem = rss
v.CPU = pcpu
v.Cores = numCores
}
// Generate a route stat for our statz update.
func routeStat(r *client) *RouteStat {
if r == nil {
return nil
}
r.mu.Lock()
rs := &RouteStat{
ID: r.cid,
Sent: DataStats{
Msgs: atomic.LoadInt64(&r.outMsgs),
Bytes: atomic.LoadInt64(&r.outBytes),
},
Received: DataStats{
Msgs: atomic.LoadInt64(&r.inMsgs),
Bytes: atomic.LoadInt64(&r.inBytes),
},
Pending: int(r.out.pb),
}
if r.route != nil {
rs.Name = r.route.remoteName
}
r.mu.Unlock()
return rs
}
// Actual send method for statz updates.
// Lock should be held.
func (s *Server) sendStatsz(subj string) {
m := ServerStatsMsg{}
updateServerUsage(&m.Stats)
m.Stats.Start = s.start
m.Stats.Connections = len(s.clients)
m.Stats.TotalConnections = s.totalClients
m.Stats.ActiveAccounts = int(atomic.LoadInt32(&s.activeAccounts))
m.Stats.Received.Msgs = atomic.LoadInt64(&s.inMsgs)
m.Stats.Received.Bytes = atomic.LoadInt64(&s.inBytes)
m.Stats.Sent.Msgs = atomic.LoadInt64(&s.outMsgs)
m.Stats.Sent.Bytes = atomic.LoadInt64(&s.outBytes)
m.Stats.SlowConsumers = atomic.LoadInt64(&s.slowConsumers)
m.Stats.NumSubs = s.numSubscriptions()
for _, r := range s.routes {
m.Stats.Routes = append(m.Stats.Routes, routeStat(r))
}
if s.gateway.enabled {
gw := s.gateway
gw.RLock()
for name, c := range gw.out {
gs := &GatewayStat{Name: name}
c.mu.Lock()
gs.ID = c.cid
gs.Sent = DataStats{
Msgs: atomic.LoadInt64(&c.outMsgs),
Bytes: atomic.LoadInt64(&c.outBytes),
}
c.mu.Unlock()
// Gather matching inbound connections
gs.Received = DataStats{}
for _, c := range gw.in {
c.mu.Lock()
if c.gw.name == name {
gs.Received.Msgs += atomic.LoadInt64(&c.inMsgs)
gs.Received.Bytes += atomic.LoadInt64(&c.inBytes)
gs.NumInbound++
}
c.mu.Unlock()
}
m.Stats.Gateways = append(m.Stats.Gateways, gs)
}
gw.RUnlock()
}
s.sendInternalMsg(subj, _EMPTY_, &m.Server, &m)
}
// Send out our statz update.
// This should be wrapChk() to setup common locking.
func (s *Server) heartbeatStatsz() {
if s.sys.stmr != nil {
s.sys.stmr.Reset(s.sys.statsz)
}
s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID))
}
// This should be wrapChk() to setup common locking.
func (s *Server) startStatszTimer() {
s.sys.stmr = time.AfterFunc(s.sys.statsz, s.wrapChk(s.heartbeatStatsz))
}
// Start a ticker that will fire periodically and check for orphaned servers.
// This should be wrapChk() to setup common locking.
func (s *Server) startRemoteServerSweepTimer() {
s.sys.sweeper = time.AfterFunc(s.sys.chkOrph, s.wrapChk(s.checkRemoteServers))
}
// Length of our system hash used for server targeted messages.
const sysHashLen = 6
// This will setup our system wide tracking subs.
// For now we will setup one wildcard subscription to
// monitor all accounts for changes in number of connections.
// We can make this on a per account tracking basis if needed.
// Tradeoff is subscription and interest graph events vs connect and
// disconnect events, etc.
func (s *Server) initEventTracking() {
if !s.EventsEnabled() {
return
}
// Create a system hash which we use for other servers to target us specifically.
sha := sha256.New()
sha.Write([]byte(s.info.ID))
s.sys.shash = base64.RawURLEncoding.EncodeToString(sha.Sum(nil))[:sysHashLen]
// This will be for all inbox responses.
subject := fmt.Sprintf(inboxRespSubj, s.sys.shash, "*")
if _, err := s.sysSubscribe(subject, s.inboxReply); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
s.sys.inboxPre = subject
// This is for remote updates for connection accounting.
for _, subj := range []string{accConnsEventSubjOld, accConnsEventSubjNew} {
subject = fmt.Sprintf(subj, "*")
if _, err := s.sysSubscribe(subject, s.remoteConnsUpdate); err != nil {
s.Errorf("Error setting up internal tracking for %s: %v", subject, err)
}
}
// This will be for responses for account info that we send out.
subject = fmt.Sprintf(connsRespSubj, s.info.ID)
if _, err := s.sysSubscribe(subject, s.remoteConnsUpdate); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for broad requests to respond with number of subscriptions for a given subject.
if _, err := s.sysSubscribe(accNumSubsReqSubj, s.nsubsRequest); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for all server shutdowns.
subject = fmt.Sprintf(shutdownEventSubj, "*")
if _, err := s.sysSubscribe(subject, s.remoteServerShutdown); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for account claims updates.
subscribeToUpdate := true
if s.accResolver != nil {
subscribeToUpdate = !s.accResolver.IsTrackingUpdate()
}
if subscribeToUpdate {
for _, sub := range []string{accUpdateEventSubjOld, accUpdateEventSubjNew} {
if _, err := s.sysSubscribe(fmt.Sprintf(sub, "*"), s.accountClaimUpdate); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
}
}
// Listen for ping messages that will be sent to all servers for statsz.
// This subscription is kept for backwards compatibility. Got replaced by ...PING.STATZ from below
if _, err := s.sysSubscribe(serverStatsPingReqSubj, s.statszReq); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
monSrvc := map[string]msgHandler{
"STATSZ": s.statszReq,
"VARZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &VarzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Varz(&optz.VarzOptions) })
},
"SUBSZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &SubszEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Subsz(&optz.SubszOptions) })
},
"CONNZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &ConnzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Connz(&optz.ConnzOptions) })
},
"ROUTEZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &RoutezEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Routez(&optz.RoutezOptions) })
},
"GATEWAYZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &GatewayzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Gatewayz(&optz.GatewayzOptions) })
},
"LEAFZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &LeafzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Leafz(&optz.LeafzOptions) })
},
"ACCOUNTZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &AccountzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Accountz(&optz.AccountzOptions) })
},
}
for name, req := range monSrvc {
subject = fmt.Sprintf(serverDirectReqSubj, s.info.ID, name)
if _, err := s.sysSubscribe(subject, req); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
subject = fmt.Sprintf(serverPingReqSubj, name)
if _, err := s.sysSubscribe(subject, req); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
}
extractAccount := func(subject string) (string, error) {
if tk := strings.Split(subject, tsep); len(tk) != accReqTokens {
return "", fmt.Errorf("subject %q is malformed", subject)
} else {
return tk[accReqAccIndex], nil
}
}
monAccSrvc := map[string]msgHandler{
"SUBSZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &SubszEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
optz.SubszOptions.Subscriptions = true
optz.SubszOptions.Account = acc
return s.Subsz(&optz.SubszOptions)
}
})
},
"CONNZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &ConnzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
optz.ConnzOptions.Account = acc
return s.Connz(&optz.ConnzOptions)
}
})
},
"LEAFZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &LeafzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
optz.LeafzOptions.Account = acc
return s.Leafz(&optz.LeafzOptions)
}
})
},
"INFO": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &AccInfoEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
return s.accountInfo(acc)
}
})
},
"CONNS": s.connsRequest,
}
for name, req := range monAccSrvc {
if _, err := s.sysSubscribe(fmt.Sprintf(accReqSubj, "*", name), req); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
}
// Listen for updates when leaf nodes connect for a given account. This will
// force any gateway connections to move to `modeInterestOnly`
subject = fmt.Sprintf(leafNodeConnectEventSubj, "*")
if _, err := s.sysSubscribe(subject, s.leafNodeConnected); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// For tracking remote latency measurements.
subject = fmt.Sprintf(remoteLatencyEventSubj, s.sys.shash)
if _, err := s.sysSubscribe(subject, s.remoteLatencyUpdate); err != nil {
s.Errorf("Error setting up internal latency tracking: %v", err)
}
// This is for simple debugging of number of subscribers that exist in the system.
if _, err := s.sysSubscribeInternal(accSubsSubj, s.debugSubscribers); err != nil {
s.Errorf("Error setting up internal debug service for subscribers: %v", err)
}
}
// add all exports a system account will need
func (s *Server) addSystemAccountExports(sacc *Account) {
if !s.EventsEnabled() {
return
}
if err := sacc.AddServiceExport(accSubsSubj, nil); err != nil {
s.Errorf("Error adding system service export for %q: %v", accSubsSubj, err)
}
}
// accountClaimUpdate will receive claim updates for accounts.
func (s *Server) accountClaimUpdate(sub *subscription, _ *client, subject, resp string, msg []byte) {
if !s.EventsEnabled() {
return
}
pubKey := ""
toks := strings.Split(subject, tsep)
if len(toks) == accUpdateTokensNew {
pubKey = toks[accReqAccIndex]
} else if len(toks) == accUpdateTokensOld {
pubKey = toks[accUpdateAccIdxOld]
} else {
s.Debugf("Received account claims update on bad subject %q", subject)
return
}
if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil {
respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err)
} else if claim.Subject != pubKey {
err := errors.New("subject does not match jwt content")
respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err)
} else if v, ok := s.accounts.Load(pubKey); !ok {
respondToUpdate(s, resp, pubKey, "jwt update skipped", nil)
} else if err := s.updateAccountWithClaimJWT(v.(*Account), string(msg)); err != nil {
respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err)
} else {
respondToUpdate(s, resp, pubKey, "jwt updated", nil)
}
}
// processRemoteServerShutdown will update any affected accounts.
// Will update the remote count for clients.
// Lock assume held.
func (s *Server) processRemoteServerShutdown(sid string) {
s.accounts.Range(func(k, v interface{}) bool {
v.(*Account).removeRemoteServer(sid)
return true
})
}
// remoteServerShutdownEvent is called when we get an event from another server shutting down.
func (s *Server) remoteServerShutdown(sub *subscription, _ *client, subject, reply string, msg []byte) {
s.mu.Lock()
defer s.mu.Unlock()
if !s.eventsEnabled() {
return
}
toks := strings.Split(subject, tsep)
if len(toks) < shutdownEventTokens {
s.Debugf("Received remote server shutdown on bad subject %q", subject)
return
}
sid := toks[serverSubjectIndex]
su := s.sys.servers[sid]
if su != nil {
s.processRemoteServerShutdown(sid)
}
}
// updateRemoteServer is called when we have an update from a remote server.
// This allows us to track remote servers, respond to shutdown messages properly,
// make sure that messages are ordered, and allow us to prune dead servers.
// Lock should be held upon entry.
func (s *Server) updateRemoteServer(ms *ServerInfo) {
su := s.sys.servers[ms.ID]
if su == nil {
s.sys.servers[ms.ID] = &serverUpdate{ms.Seq, time.Now()}
s.processNewServer(ms)
} else {
// Should always be going up.
if ms.Seq <= su.seq {
s.Errorf("Received out of order remote server update from: %q", ms.ID)
return
}
su.seq = ms.Seq
su.ltime = time.Now()
}
}
// processNewServer will hold any logic we want to use when we discover a new server.
// Lock should be held upon entry.
func (s *Server) processNewServer(ms *ServerInfo) {
// Right now we only check if we have leafnode servers and if so send another
// connect update to make sure they switch this account to interest only mode.
s.ensureGWsInterestOnlyForLeafNodes()
}
// If GW is enabled on this server and there are any leaf node connections,
// this function will send a LeafNode connect system event to the super cluster
// to ensure that the GWs are in interest-only mode for this account.
// Lock should be held upon entry.
// TODO(dlc) - this will cause this account to be loaded on all servers. Need a better
// way with GW2.
func (s *Server) ensureGWsInterestOnlyForLeafNodes() {
if !s.gateway.enabled || len(s.leafs) == 0 {
return
}
sent := make(map[*Account]bool, len(s.leafs))
for _, c := range s.leafs {
if !sent[c.acc] {
s.sendLeafNodeConnectMsg(c.acc.Name)
sent[c.acc] = true
}
}
}
// shutdownEventing will clean up all eventing state.
func (s *Server) shutdownEventing() {
if !s.eventsRunning() {
return
}
s.mu.Lock()
clearTimer(&s.sys.sweeper)
clearTimer(&s.sys.stmr)
s.mu.Unlock()
// We will queue up a shutdown event and wait for the
// internal send loop to exit.
s.sendShutdownEvent()
s.sys.wg.Wait()
close(s.sys.resetCh)
s.mu.Lock()
defer s.mu.Unlock()
// Whip through all accounts.
s.accounts.Range(func(k, v interface{}) bool {
v.(*Account).clearEventing()
return true
})
// Turn everything off here.
s.sys = nil
}
// Request for our local connection count.
func (s *Server) connsRequest(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.eventsRunning() {
return
}
tk := strings.Split(subject, tsep)
if len(tk) != accReqTokens {
s.sys.client.Errorf("Bad subject account connections request message")
return
}
a := tk[accReqAccIndex]
m := accNumConnsReq{Account: a}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account connections request message: %v", err)
return
}
if m.Account != a {
s.sys.client.Errorf("Error unmarshalled account does not match subject")
return
}
// Here we really only want to lookup the account if its local. We do not want to fetch this
// account if we have no interest in it.
var acc *Account
if v, ok := s.accounts.Load(m.Account); ok {
acc = v.(*Account)
}
if acc == nil {
return
}
// We know this is a local connection.
if nlc := acc.NumLocalConnections(); nlc > 0 {
s.mu.Lock()
s.sendAccConnsUpdate(acc, reply)
s.mu.Unlock()
}
}
// leafNodeConnected is an event we will receive when a leaf node for a given account connects.
func (s *Server) leafNodeConnected(sub *subscription, _ *client, subject, reply string, msg []byte) {
m := accNumConnsReq{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account connections request message: %v", err)
return
}
s.mu.Lock()
na := m.Account == "" || !s.eventsEnabled() || !s.gateway.enabled
s.mu.Unlock()
if na {
return
}
if acc, _ := s.lookupAccount(m.Account); acc != nil {
s.switchAccountToInterestMode(acc.Name)
}
}
// Common filter options for system requests STATSZ VARZ SUBSZ CONNZ ROUTEZ GATEWAYZ LEAFZ
type EventFilterOptions struct {
Name string `json:"server_name,omitempty"` // filter by server name
Cluster string `json:"cluster,omitempty"` // filter by cluster name
Host string `json:"host,omitempty"` // filter by host name
}
// StatszEventOptions are options passed to Statsz
type StatszEventOptions struct {
// No actual options yet
EventFilterOptions
}
// Options for account Info
type AccInfoEventOptions struct {
// No actual options yet
EventFilterOptions
}
// In the context of system events, ConnzEventOptions are options passed to Connz
type ConnzEventOptions struct {
ConnzOptions
EventFilterOptions
}
// In the context of system events, RoutezEventOptions are options passed to Routez
type RoutezEventOptions struct {
RoutezOptions
EventFilterOptions
}
// In the context of system events, SubzEventOptions are options passed to Subz
type SubszEventOptions struct {
SubszOptions
EventFilterOptions
}
// In the context of system events, VarzEventOptions are options passed to Varz
type VarzEventOptions struct {
VarzOptions
EventFilterOptions
}
// In the context of system events, GatewayzEventOptions are options passed to Gatewayz
type GatewayzEventOptions struct {
GatewayzOptions
EventFilterOptions
}
// In the context of system events, LeafzEventOptions are options passed to Leafz
type LeafzEventOptions struct {
LeafzOptions
EventFilterOptions
}
// In the context of system events, AccountzEventOptions are options passed to Accountz
type AccountzEventOptions struct {
AccountzOptions
EventFilterOptions
}
// returns true if the request does NOT apply to this server and can be ignored.
// DO NOT hold the server lock when
func (s *Server) filterRequest(fOpts *EventFilterOptions) bool {
if fOpts.Name != "" && !strings.Contains(s.info.Name, fOpts.Name) {
return true
}
if fOpts.Host != "" && !strings.Contains(s.info.Host, fOpts.Host) {
return true
}
if fOpts.Cluster != "" {
s.mu.Lock()
cluster := s.info.Cluster
s.mu.Unlock()
if !strings.Contains(cluster, fOpts.Cluster) {
return true
}
}
return false
}
// statszReq is a request for us to respond with current statsz.
func (s *Server) statszReq(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.EventsEnabled() || reply == _EMPTY_ {
return
}
opts := StatszEventOptions{}
if len(msg) != 0 {
if err := json.Unmarshal(msg, &opts); err != nil {
server := &ServerInfo{}
response := map[string]interface{}{"server": server}
response["error"] = map[string]interface{}{
"code": http.StatusBadRequest,
"description": err.Error(),
}
s.sendInternalMsgLocked(reply, _EMPTY_, server, response)
return
} else if ignore := s.filterRequest(&opts.EventFilterOptions); ignore {
return
}
}
s.mu.Lock()
s.sendStatsz(reply)
s.mu.Unlock()
}
func (s *Server) zReq(reply string, msg []byte, fOpts *EventFilterOptions, optz interface{}, respf func() (interface{}, error)) {
if !s.EventsEnabled() || reply == _EMPTY_ {
return
}
server := &ServerInfo{}
response := map[string]interface{}{"server": server}
var err error
status := 0
if len(msg) != 0 {
if err = json.Unmarshal(msg, optz); err != nil {
status = http.StatusBadRequest // status is only included on error, so record how far execution got
} else if s.filterRequest(fOpts) {
return
}
}
if err == nil {
response["data"], err = respf()
status = http.StatusInternalServerError
}
if err != nil {
response["error"] = map[string]interface{}{
"code": status,
"description": err.Error(),
}
}
s.sendInternalMsgLocked(reply, _EMPTY_, server, response)
}
// remoteConnsUpdate gets called when we receive a remote update from another server.
func (s *Server) remoteConnsUpdate(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.eventsRunning() {
return
}
m := AccountNumConns{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account connection event message: %v", err)
return
}
// See if we have the account registered, if not drop it.
// Make sure this does not force us to load this account here.
var acc *Account
if v, ok := s.accounts.Load(m.Account); ok {
acc = v.(*Account)
}
// Silently ignore these if we do not have local interest in the account.
if acc == nil {
return
}
s.mu.Lock()
// check again here if we have been shutdown.
if !s.running || !s.eventsEnabled() {
s.mu.Unlock()
return
}
// Double check that this is not us, should never happen, so error if it does.
if m.Server.ID == s.info.ID {
s.sys.client.Errorf("Processing our own account connection event message: ignored")
s.mu.Unlock()
return
}
// If we are here we have interest in tracking this account. Update our accounting.
clients := acc.updateRemoteServer(&m)
s.updateRemoteServer(&m.Server)
s.mu.Unlock()
// Need to close clients outside of server lock
for _, c := range clients {
c.maxAccountConnExceeded()
}
}
// Setup tracking for this account. This allows us to track global account activity.
// Lock should be held on entry.
func (s *Server) enableAccountTracking(a *Account) {
if a == nil || !s.eventsEnabled() {
return
}
// TODO(ik): Generate payload although message may not be sent.
// May need to ensure we do so only if there is a known interest.
// This can get complicated with gateways.
subj := fmt.Sprintf(accReqSubj, a.Name, "CONNS")
reply := fmt.Sprintf(connsRespSubj, s.info.ID)
m := accNumConnsReq{Account: a.Name}
s.sendInternalMsg(subj, reply, &m.Server, &m)
}
// Event on leaf node connect.
// Lock should NOT be held on entry.
func (s *Server) sendLeafNodeConnect(a *Account) {
s.mu.Lock()
// If we are not in operator mode, or do not have any gateways defined, this should also be a no-op.
if a == nil || !s.eventsEnabled() || !s.gateway.enabled {
s.mu.Unlock()
return
}
s.sendLeafNodeConnectMsg(a.Name)
s.mu.Unlock()
s.switchAccountToInterestMode(a.Name)
}
// Send the leafnode connect message.
// Lock should be held.
func (s *Server) sendLeafNodeConnectMsg(accName string) {
subj := fmt.Sprintf(leafNodeConnectEventSubj, accName)
m := accNumConnsReq{Account: accName}
s.sendInternalMsg(subj, "", &m.Server, &m)
}
// sendAccConnsUpdate is called to send out our information on the
// account's local connections.
// Lock should be held on entry.
func (s *Server) sendAccConnsUpdate(a *Account, subj ...string) {
if !s.eventsEnabled() || a == nil {
return
}
sendQ := s.sys.sendq
if sendQ == nil {
return
}
// Build event with account name and number of local clients and leafnodes.
a.mu.Lock()
s.mu.Unlock()
localConns := a.numLocalConnections()
m := &AccountNumConns{
Account: a.Name,
Conns: localConns,
LeafNodes: a.numLocalLeafNodes(),
TotalConns: localConns + a.numLocalLeafNodes(),
}
// Set timer to fire again unless we are at zero.
if localConns == 0 {
clearTimer(&a.ctmr)
} else {
// Check to see if we have an HB running and update.
if a.ctmr == nil {
a.ctmr = time.AfterFunc(eventsHBInterval, func() { s.accConnsUpdate(a) })
} else {
a.ctmr.Reset(eventsHBInterval)
}
}
for _, sub := range subj {
msg := &pubMsg{nil, sub, _EMPTY_, &m.Server, &m, false}
select {
case sendQ <- msg:
default:
a.mu.Unlock()
sendQ <- msg
a.mu.Lock()
}
}
a.mu.Unlock()
s.mu.Lock()
}
// accConnsUpdate is called whenever there is a change to the account's
// number of active connections, or during a heartbeat.
func (s *Server) accConnsUpdate(a *Account) {
s.mu.Lock()
defer s.mu.Unlock()
if !s.eventsEnabled() || a == nil {
return
}
s.sendAccConnsUpdate(a, fmt.Sprintf(accConnsEventSubjOld, a.Name), fmt.Sprintf(accConnsEventSubjNew, a.Name))
}
// server lock should be held
func (s *Server) nextEventID() string {
return s.eventIds.Next()
}
// accountConnectEvent will send an account client connect event if there is interest.
// This is a billing event.
func (s *Server) accountConnectEvent(c *client) {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
gacc := s.gacc
eid := s.nextEventID()
s.mu.Unlock()
c.mu.Lock()
// Ignore global account activity
if c.acc == nil || c.acc == gacc {
c.mu.Unlock()
return
}
m := ConnectEventMsg{
TypedEvent: TypedEvent{
Type: ConnectEventMsgType,
ID: eid,
Time: time.Now().UTC(),
},
Client: ClientInfo{
Start: c.start,
Host: c.host,
ID: c.cid,
Account: accForClient(c),
User: c.getRawAuthUser(),
Name: c.opts.Name,
Lang: c.opts.Lang,
Version: c.opts.Version,
},
}
c.mu.Unlock()
subj := fmt.Sprintf(connectEventSubj, c.acc.Name)
s.sendInternalMsgLocked(subj, _EMPTY_, &m.Server, &m)
}
// accountDisconnectEvent will send an account client disconnect event if there is interest.
// This is a billing event.
func (s *Server) accountDisconnectEvent(c *client, now time.Time, reason string) {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
gacc := s.gacc
eid := s.nextEventID()
s.mu.Unlock()
c.mu.Lock()
// Ignore global account activity
if c.acc == nil || c.acc == gacc {
c.mu.Unlock()
return
}
m := DisconnectEventMsg{
TypedEvent: TypedEvent{
Type: DisconnectEventMsgType,
ID: eid,
Time: now.UTC(),
},
Client: ClientInfo{
Start: c.start,
Stop: &now,
Host: c.host,
ID: c.cid,
Account: accForClient(c),
User: c.getRawAuthUser(),
Name: c.opts.Name,
Lang: c.opts.Lang,
Version: c.opts.Version,
RTT: c.getRTT(),
},
Sent: DataStats{
Msgs: atomic.LoadInt64(&c.inMsgs),
Bytes: atomic.LoadInt64(&c.inBytes),
},
Received: DataStats{
Msgs: c.outMsgs,
Bytes: c.outBytes,
},
Reason: reason,
}
accName := c.acc.Name
c.mu.Unlock()
subj := fmt.Sprintf(disconnectEventSubj, accName)
s.sendInternalMsgLocked(subj, _EMPTY_, &m.Server, &m)
}
func (s *Server) sendAuthErrorEvent(c *client) {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
eid := s.nextEventID()
s.mu.Unlock()
now := time.Now()
c.mu.Lock()
m := DisconnectEventMsg{
TypedEvent: TypedEvent{
Type: DisconnectEventMsgType,
ID: eid,
Time: now.UTC(),
},
Client: ClientInfo{
Start: c.start,
Stop: &now,
Host: c.host,
ID: c.cid,
Account: accForClient(c),
User: c.getRawAuthUser(),
Name: c.opts.Name,
Lang: c.opts.Lang,
Version: c.opts.Version,
RTT: c.getRTT(),
},
Sent: DataStats{
Msgs: c.inMsgs,
Bytes: c.inBytes,
},
Received: DataStats{
Msgs: c.outMsgs,
Bytes: c.outBytes,
},
Reason: AuthenticationViolation.String(),
}
c.mu.Unlock()
s.mu.Lock()
subj := fmt.Sprintf(authErrorEventSubj, s.info.ID)
s.sendInternalMsg(subj, _EMPTY_, &m.Server, &m)
s.mu.Unlock()
}
// Internal message callback. If the msg is needed past the callback it is
// required to be copied.
type msgHandler func(sub *subscription, client *client, subject, reply string, msg []byte)
// Create an internal subscription. sysSubscribeQ for queue groups.
func (s *Server) sysSubscribe(subject string, cb msgHandler) (*subscription, error) {
return s.systemSubscribe(subject, "", false, cb)
}
// Create an internal subscription with queue
func (s *Server) sysSubscribeQ(subject, queue string, cb msgHandler) (*subscription, error) {
return s.systemSubscribe(subject, queue, false, cb)
}
// Create an internal subscription but do not forward interest.
func (s *Server) sysSubscribeInternal(subject string, cb msgHandler) (*subscription, error) {
return s.systemSubscribe(subject, "", true, cb)
}
func (s *Server) systemSubscribe(subject, queue string, internalOnly bool, cb msgHandler) (*subscription, error) {
if !s.eventsEnabled() {
return nil, ErrNoSysAccount
}
if cb == nil {
return nil, fmt.Errorf("undefined message handler")
}
s.mu.Lock()
c := s.sys.client
trace := c.trace
s.sys.sid++
sid := strconv.Itoa(s.sys.sid)
s.mu.Unlock()
// Now create the subscription
if trace {
c.traceInOp("SUB", []byte(subject+" "+queue+" "+sid))
}
var q []byte
if queue != "" {
q = []byte(queue)
}
return c.processSub([]byte(subject), q, []byte(sid), cb, internalOnly)
}
func (s *Server) sysUnsubscribe(sub *subscription) {
if sub == nil || !s.eventsEnabled() {
return
}
s.mu.Lock()
acc := s.sys.account
c := s.sys.client
s.mu.Unlock()
c.unsubscribe(acc, sub, true, true)
}
// This will generate the tracking subject for remote latency from the response subject.
func remoteLatencySubjectForResponse(subject []byte) string {
if !isTrackedReply(subject) {
return ""
}
toks := bytes.Split(subject, []byte(tsep))
// FIXME(dlc) - Sprintf may become a performance concern at some point.
return fmt.Sprintf(remoteLatencyEventSubj, toks[len(toks)-2])
}
// remoteLatencyUpdate is used to track remote latency measurements for tracking on exported services.
func (s *Server) remoteLatencyUpdate(sub *subscription, _ *client, subject, _ string, msg []byte) {
if !s.eventsRunning() {
return
}
rl := remoteLatency{}
if err := json.Unmarshal(msg, &rl); err != nil {
s.Errorf("Error unmarshalling remote latency measurement: %v", err)
return
}
// Now we need to look up the responseServiceImport associated with this measurement.
acc, err := s.LookupAccount(rl.Account)
if err != nil {
s.Warnf("Could not lookup account %q for latency measurement", rl.Account)
return
}
// Now get the request id / reply. We need to see if we have a GW prefix and if so strip that off.
reply := rl.ReqId
if gwPrefix, old := isGWRoutedSubjectAndIsOldPrefix([]byte(reply)); gwPrefix {
reply = string(getSubjectFromGWRoutedReply([]byte(reply), old))
}
acc.mu.RLock()
si := acc.exports.responses[reply]
if si == nil {
acc.mu.RUnlock()
return
}
m1 := si.m1
m2 := rl.M2
lsub := si.latency.subject
acc.mu.RUnlock()
// So we have not processed the response tracking measurement yet.
if m1 == nil {
si.acc.mu.Lock()
// Double check since could have slipped in.
m1 = si.m1
if m1 == nil {
// Store our value there for them to pick up.
si.m1 = &m2
}
si.acc.mu.Unlock()
if m1 == nil {
return
}
}
// Calculate the correct latencies given M1 and M2.
m1.merge(&m2)
// Clear the requesting client since we send the result here.
acc.mu.Lock()
si.rc = nil
acc.mu.Unlock()
// Make sure we remove the entry here.
acc.removeServiceImport(si.from)
// Send the metrics
s.sendInternalAccountMsg(acc, lsub, m1)
}
// This is used for all inbox replies so that we do not send supercluster wide interest
// updates for every request. Same trick used in modern NATS clients.
func (s *Server) inboxReply(sub *subscription, c *client, subject, reply string, msg []byte) {
s.mu.Lock()
if !s.eventsEnabled() || s.sys.replies == nil {
s.mu.Unlock()
return
}
cb, ok := s.sys.replies[subject]
s.mu.Unlock()
if ok && cb != nil {
cb(sub, c, subject, reply, msg)
}
}
// Copied from go client.
// We could use serviceReply here instead to save some code.
// I prefer these semantics for the moment, when tracing you know what this is.
const (
InboxPrefix = "$SYS._INBOX."
inboxPrefixLen = len(InboxPrefix)
respInboxPrefixLen = inboxPrefixLen + sysHashLen + 1
replySuffixLen = 8 // Gives us 62^8
)
// Creates an internal inbox used for replies that will be processed by the global wc handler.
func (s *Server) newRespInbox() string {
var b [respInboxPrefixLen + replySuffixLen]byte
pres := b[:respInboxPrefixLen]
copy(pres, s.sys.inboxPre)
rn := rand.Int63()
for i, l := respInboxPrefixLen, rn; i < len(b); i++ {
b[i] = digits[l%base]
l /= base
}
return string(b[:])
}
// accNumSubsReq is sent when we need to gather remote info on subs.
type accNumSubsReq struct {
Account string `json:"acc"`
Subject string `json:"subject"`
Queue []byte `json:"queue,omitempty"`
}
// helper function to total information from results to count subs.
func totalSubs(rr *SublistResult, qg []byte) (nsubs int32) {
if rr == nil {
return
}
checkSub := func(sub *subscription) {
// TODO(dlc) - This could be smarter.
if qg != nil && !bytes.Equal(qg, sub.queue) {
return
}
if sub.client.kind == CLIENT || sub.client.isHubLeafNode() {
nsubs++
}
}
if qg == nil {
for _, sub := range rr.psubs {
checkSub(sub)
}
}
for _, qsub := range rr.qsubs {
for _, sub := range qsub {
checkSub(sub)
}
}
return
}
// Allows users of large systems to debug active subscribers for a given subject.
// Payload should be the subject of interest.
func (s *Server) debugSubscribers(sub *subscription, c *client, subject, reply string, msg []byte) {
// Even though this is an internal only subscription, meaning interest was not forwarded, we could
// get one here from a GW in optimistic mode. Ignore for now.
// FIXME(dlc) - Should we send no interest here back to the GW?
if c.kind != CLIENT {
return
}
var nsubs int32
// We could have a single subject or we could have a subject and a wildcard separated by whitespace.
args := strings.Split(strings.TrimSpace(string(msg)), " ")
if len(args) == 0 {
s.sendInternalAccountMsg(c.acc, reply, 0)
return
}
tsubj := args[0]
var qgroup []byte
if len(args) > 1 {
qgroup = []byte(args[1])
}
if subjectIsLiteral(tsubj) {
// We will look up subscribers locally first then determine if we need to solicit other servers.
rr := c.acc.sl.Match(tsubj)
nsubs = totalSubs(rr, qgroup)
} else {
// We have a wildcard, so this is a bit slower path.
var _subs [32]*subscription
subs := _subs[:0]
c.acc.sl.All(&subs)
for _, sub := range subs {
if subjectIsSubsetMatch(string(sub.subject), tsubj) {
if qgroup != nil && !bytes.Equal(qgroup, sub.queue) {
continue
}
if sub.client.kind == CLIENT || sub.client.isHubLeafNode() {
nsubs++
}
}
}
}
// We should have an idea of how many responses to expect from remote servers.
var expected = c.acc.expectedRemoteResponses()
// If we are only local, go ahead and return.
if expected == 0 {
s.sendInternalAccountMsg(c.acc, reply, nsubs)
return
}
// We need to solicit from others.
// To track status.
responses := int32(0)
done := make(chan (bool))
s.mu.Lock()
// Create direct reply inbox that we multiplex under the WC replies.
replySubj := s.newRespInbox()
// Store our handler.
s.sys.replies[replySubj] = func(sub *subscription, _ *client, subject, _ string, msg []byte) {
if n, err := strconv.Atoi(string(msg)); err == nil {
atomic.AddInt32(&nsubs, int32(n))
}
if atomic.AddInt32(&responses, 1) >= expected {
select {
case done <- true:
default:
}
}
}
// Send the request to the other servers.
request := &accNumSubsReq{
Account: c.acc.Name,
Subject: tsubj,
Queue: qgroup,
}
s.sendInternalMsg(accNumSubsReqSubj, replySubj, nil, request)
s.mu.Unlock()
// FIXME(dlc) - We should rate limit here instead of blind Go routine.
go func() {
select {
case <-done:
case <-time.After(500 * time.Millisecond):
}
// Cleanup the WC entry.
s.mu.Lock()
delete(s.sys.replies, replySubj)
s.mu.Unlock()
// Send the response.
s.sendInternalAccountMsg(c.acc, reply, atomic.LoadInt32(&nsubs))
}()
}
// Request for our local subscription count. This will come from a remote origin server
// that received the initial request.
func (s *Server) nsubsRequest(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.eventsRunning() {
return
}
m := accNumSubsReq{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account nsubs request message: %v", err)
return
}
// Grab account.
acc, _ := s.lookupAccount(m.Account)
if acc == nil || acc.numLocalAndLeafConnections() == 0 {
return
}
// We will look up subscribers locally first then determine if we need to solicit other servers.
var nsubs int32
if subjectIsLiteral(m.Subject) {
rr := acc.sl.Match(m.Subject)
nsubs = totalSubs(rr, m.Queue)
} else {
// We have a wildcard, so this is a bit slower path.
var _subs [32]*subscription
subs := _subs[:0]
acc.sl.All(&subs)
for _, sub := range subs {
if (sub.client.kind == CLIENT || sub.client.isHubLeafNode()) && subjectIsSubsetMatch(string(sub.subject), m.Subject) {
if m.Queue != nil && !bytes.Equal(m.Queue, sub.queue) {
continue
}
nsubs++
}
}
}
s.sendInternalMsgLocked(reply, _EMPTY_, nil, nsubs)
}
// Helper to grab account name for a client.
func accForClient(c *client) string {
if c.acc != nil {
return c.acc.Name
}
return "N/A"
}
// Helper to clear timers.
func clearTimer(tp **time.Timer) {
if t := *tp; t != nil {
t.Stop()
*tp = nil
}
}
// Helper function to wrap functions with common test
// to lock server and return if events not enabled.
func (s *Server) wrapChk(f func()) func() {
return func() {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
f()
s.mu.Unlock()
}
}
| 1 | 11,533 | But why introduce publish on `accConnsEventSubjNew` subject if we never subscribe on that subject. What is the plan then? Replace subscription on "old" with "new" at one point? Not sure about all that.. | nats-io-nats-server | go |
@@ -269,7 +269,7 @@ void *ap6_thread(void *thread_context)
int ret;
struct timespec ts = { .tv_sec = 0, .tv_nsec = 100000 }; /* 100ms */
- fpga_token fme_token;
+ fpga_token fme_token = NULL;
fpga_handle fme_handle;
fpga_properties filter;
fpga_result res; | 1 | // Copyright(c) 2017, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
/*
* ap6.c : handles NULL bitstream programming on AP6
*/
#include <opae/fpga.h>
#include <unistd.h>
#include <errno.h>
#include <stdlib.h>
#include "ap6.h"
#include "config_int.h"
#include "log.h"
#include "safe_string/safe_string.h"
#include "fpgaconf/bitstream-tools.h"
/*
* macro to check FPGA return codes, print error message, and goto cleanup label
* NOTE: this changes the program flow (uses goto)!
*/
#define ON_GOTO(cond, label, desc, ...) \
do { \
if (cond) { \
dlog("ap6[%i]: " desc "\n", c->socket, ## __VA_ARGS__); \
goto label; \
} \
} while (0)
sem_t ap6_sem[MAX_SOCKETS];
struct bitstream_info {
const char *filename;
uint8_t *data;
size_t data_len;
uint8_t *rbf_data;
size_t rbf_len;
fpga_guid interface_id;
};
/*
* Check for bitstream header and fill out bistream_info fields
*/
#define MAGIC 0x1d1f8680
#define MAGIC_SIZE 4
#define HEADER_SIZE 20
int parse_metadata(struct bitstream_info *info)
{
unsigned i;
if (!info)
return -EINVAL;
if (info->data_len < HEADER_SIZE) {
fprintf(stderr, "File too small to be GBS\n");
return -1;
}
if (((uint32_t *)info->data)[0] != MAGIC) {
fprintf(stderr, "No valid GBS header\n");
return -1;
}
/* reverse byte order when reading GBS */
for (i = 0; i < sizeof(info->interface_id); i++)
info->interface_id[i] =
info->data[MAGIC_SIZE+sizeof(info->interface_id)-1-i];
info->rbf_data = &info->data[HEADER_SIZE];
info->rbf_len = info->data_len - HEADER_SIZE;
return 0;
}
/*
* Read inferface id from bistream
*/
fpga_result get_bitstream_ifc_id(const uint8_t *bitstream, fpga_guid *guid)
{
fpga_result result = FPGA_EXCEPTION;
char *json_metadata = NULL;
uint32_t json_len = 0;
const uint8_t *json_metadata_ptr = NULL;
json_object *root = NULL;
json_object *afu_image = NULL;
json_object *interface_id = NULL;
errno_t e;
if (check_bitstream_guid(bitstream) != FPGA_OK)
goto out_free;
json_len = read_int_from_bitstream(bitstream + METADATA_GUID_LEN, sizeof(uint32_t));
if (json_len == 0) {
OPAE_MSG("Bitstream has no metadata");
result = FPGA_OK;
goto out_free;
}
json_metadata_ptr = bitstream + METADATA_GUID_LEN + sizeof(uint32_t);
json_metadata = (char *) malloc(json_len + 1);
if (json_metadata == NULL) {
OPAE_ERR("Could not allocate memory for metadata!");
return FPGA_NO_MEMORY;
}
e = memcpy_s(json_metadata, json_len+1,
json_metadata_ptr, json_len);
if (EOK != e) {
OPAE_ERR("memcpy_s failed");
result = FPGA_EXCEPTION;
goto out_free;
}
json_metadata[json_len] = '\0';
root = json_tokener_parse(json_metadata);
if (root != NULL) {
if (json_object_object_get_ex(root, GBS_AFU_IMAGE, &afu_image)) {
json_object_object_get_ex(afu_image, BBS_INTERFACE_ID, &interface_id);
if (interface_id == NULL) {
OPAE_ERR("Invalid metadata");
result = FPGA_INVALID_PARAM;
goto out_free;
}
result = string_to_guid(json_object_get_string(interface_id), guid);
if (result != FPGA_OK) {
OPAE_ERR("Invalid BBS interface id ");
goto out_free;
}
} else {
OPAE_ERR("Invalid metadata");
result = FPGA_INVALID_PARAM;
goto out_free;
}
}
out_free:
if (root)
json_object_put(root);
if (json_metadata)
free(json_metadata);
return result;
}
/*
* Read bitstream from file and populate bitstream_info structure
*/
//TODO: remove this check when all bitstreams conform to JSON
//metadata spec.
static bool skip_header_checks;
int read_bitstream(const char *filename, struct bitstream_info *info)
{
FILE *f;
long len;
int ret;
if (!filename || !info)
return -EINVAL;
info->filename = filename;
/* open file */
f = fopen(filename, "rb");
if (!f) {
perror(filename);
return -1;
}
/* get filesize */
ret = fseek(f, 0, SEEK_END);
if (ret < 0) {
perror(filename);
goto out_close;
}
len = ftell(f);
if (len < 0) {
perror(filename);
goto out_close;
}
/* allocate memory */
info->data = (uint8_t *)malloc(len);
if (!info->data) {
perror("malloc");
goto out_close;
}
/* read bistream data */
ret = fseek(f, 0, SEEK_SET);
if (ret < 0) {
perror(filename);
goto out_free;
}
info->data_len = fread(info->data, 1, len, f);
if (ferror(f)) {
perror(filename);
goto out_free;
}
if (info->data_len != (size_t)len) {
fprintf(stderr,
"Filesize and number of bytes read don't match\n");
goto out_free;
}
if (check_bitstream_guid(info->data) == FPGA_OK) {
skip_header_checks = true;
printf(" skip_header_checks = true;\n");
if (get_bitstream_ifc_id(info->data, &(info->interface_id))
!= FPGA_OK) {
fprintf(stderr, "Invalid metadata in the bitstream\n");
goto out_free;
}
}
if (!skip_header_checks) {
/* populate remaining bitstream_info fields */
ret = parse_metadata(info);
if (ret < 0)
goto out_free;
}
fclose(f);
return 0;
out_free:
if (info->data)
free((void *)info->data);
info->data = NULL;
out_close:
fclose(f);
return -1;
}
void *ap6_thread(void *thread_context)
{
struct ap6_context *c = (struct ap6_context *)thread_context;
unsigned i;
int ret;
struct timespec ts = { .tv_sec = 0, .tv_nsec = 100000 }; /* 100ms */
fpga_token fme_token;
fpga_handle fme_handle;
fpga_properties filter;
fpga_result res;
uint32_t num_matches = 0;
struct bitstream_info null_gbs_info ;
memset_s(&null_gbs_info, sizeof(null_gbs_info), 0);
ON_GOTO(c->config->num_null_gbs == 0, out_exit, "no NULL bitstreams registered.");
res = fpgaGetProperties(NULL, &filter);
ON_GOTO(res != FPGA_OK, out_exit, "enumeration failed");
for (i = 0; i < c->config->num_null_gbs; i++) {
ret = read_bitstream(c->config->null_gbs[i], &null_gbs_info);
if (ret < 0) {
dlog("ap6[%i]: \tfailed to read bitstream\n", c->socket);
if (null_gbs_info.data)
free((void *)null_gbs_info.data);
null_gbs_info.data = NULL;
continue;
}
res = fpgaClearProperties(filter);
ON_GOTO(res != FPGA_OK, out_destroy_filter, "enumeration failed");
res = fpgaPropertiesSetObjectType(filter, FPGA_DEVICE);
res += fpgaPropertiesSetSocketID(filter, c->socket);
res += fpgaPropertiesSetGUID(filter, null_gbs_info.interface_id);
ON_GOTO(res != FPGA_OK, out_destroy_filter, "enumeration failed");
res = fpgaEnumerate(&filter, 1, &fme_token, 1, &num_matches);
ON_GOTO(res != FPGA_OK, out_destroy_filter, "enumeration failed");
if (num_matches > 0)
break;
}
res = fpgaDestroyProperties(&filter);
ON_GOTO(res != FPGA_OK, out_exit, "enumeration failed");
/* if we didn't find a matching FPGA, bail out */
if (i == c->config->num_null_gbs)
goto out_exit;
/* now, fme_token holds the token for an FPGA on our socket matching the
* interface ID of the NULL GBS */
dlog("ap6[%i]: waiting for AP6, will write the following bitstream: \"%s\"\n", c->socket, c->config->null_gbs[i]);
while (c->config->running) {
/* wait for event */
ret = sem_timedwait(&ap6_sem[c->socket], &ts);
/* if AP6 */
if (ret == 0) {
/* program NULL bitstream */
dlog("ap6[%i]: writing NULL bitstreams.\n", c->socket);
res = fpgaOpen(fme_token, &fme_handle, 0);
if (res != FPGA_OK) {
dlog("ap6[%i]: failed to open FPGA.\n", c->socket);
/* TODO: retry? */
continue;
}
res = fpgaReconfigureSlot(fme_handle, 0, null_gbs_info.data, null_gbs_info.data_len, 0);
if (res != FPGA_OK) {
dlog("ap6[%i]: failed to write bitstream.\n", c->socket);
/* TODO: retry? */
}
res = fpgaClose(fme_handle);
if (res != FPGA_OK) {
dlog("ap6[%i]: failed to close FPGA.\n", c->socket);
}
}
}
out_exit:
if (null_gbs_info.data)
free(null_gbs_info.data);
return NULL;
out_destroy_filter:
fpgaDestroyProperties(&filter);
goto out_exit;
}
| 1 | 17,522 | can we use `nullptr` for consistency? | OPAE-opae-sdk | c |
@@ -38,7 +38,7 @@ class DomainRouterFactory
protected $routerConfiguration;
/**
- * @var \Symfony\Component\Routing\Router[]
+ * @var \Shopsys\FrameworkBundle\Component\Router\DomainRouter[]
*/
protected $routersByDomainId = [];
| 1 | <?php
namespace Shopsys\FrameworkBundle\Component\Router;
use Shopsys\FrameworkBundle\Component\Domain\Config\DomainConfig;
use Shopsys\FrameworkBundle\Component\Domain\Domain;
use Shopsys\FrameworkBundle\Component\Router\FriendlyUrl\FriendlyUrlRouterFactory;
use Symfony\Component\Config\Loader\LoaderInterface;
use Symfony\Component\HttpFoundation\RequestStack;
use Symfony\Component\Routing\RequestContext;
use Symfony\Component\Routing\Router;
class DomainRouterFactory
{
/**
* @var \Shopsys\FrameworkBundle\Component\Router\LocalizedRouterFactory
*/
protected $localizedRouterFactory;
/**
* @var \Shopsys\FrameworkBundle\Component\Router\FriendlyUrl\FriendlyUrlRouterFactory
*/
protected $friendlyUrlRouterFactory;
/**
* @var \Shopsys\FrameworkBundle\Component\Domain\Domain
*/
protected $domain;
/**
* @var \Symfony\Component\Config\Loader\LoaderInterface
*/
protected $configLoader;
/**
* @var string
*/
protected $routerConfiguration;
/**
* @var \Symfony\Component\Routing\Router[]
*/
protected $routersByDomainId = [];
/**
* @var \Symfony\Component\HttpFoundation\RequestStack
*/
protected $requestStack;
/**
* @param mixed $routerConfiguration
* @param \Symfony\Component\Config\Loader\LoaderInterface $configLoader
* @param \Shopsys\FrameworkBundle\Component\Router\LocalizedRouterFactory $localizedRouterFactory
* @param \Shopsys\FrameworkBundle\Component\Router\FriendlyUrl\FriendlyUrlRouterFactory $friendlyUrlRouterFactory
* @param \Shopsys\FrameworkBundle\Component\Domain\Domain $domain
*/
public function __construct(
$routerConfiguration,
LoaderInterface $configLoader,
LocalizedRouterFactory $localizedRouterFactory,
FriendlyUrlRouterFactory $friendlyUrlRouterFactory,
Domain $domain
) {
$this->routerConfiguration = $routerConfiguration;
$this->configLoader = $configLoader;
$this->localizedRouterFactory = $localizedRouterFactory;
$this->domain = $domain;
$this->friendlyUrlRouterFactory = $friendlyUrlRouterFactory;
}
/**
* @param \Symfony\Component\HttpFoundation\RequestStack $requestStack
*/
public function setRequestStack(RequestStack $requestStack)
{
$this->requestStack = $requestStack;
}
/**
* @param int $domainId
* @return \Shopsys\FrameworkBundle\Component\Router\DomainRouter
*/
public function getRouter($domainId)
{
if (!array_key_exists($domainId, $this->routersByDomainId)) {
try {
$domainConfig = $this->domain->getDomainConfigById($domainId);
} catch (\Shopsys\FrameworkBundle\Component\Domain\Exception\InvalidDomainIdException $exception) {
throw new \Shopsys\FrameworkBundle\Component\Router\Exception\RouterNotResolvedException('', $exception);
}
$context = $this->getRequestContextByDomainConfig($domainConfig);
$basicRouter = $this->getBasicRouter($domainConfig);
$localizedRouter = $this->localizedRouterFactory->getRouter($domainConfig->getLocale(), $context);
$friendlyUrlRouter = $this->friendlyUrlRouterFactory->createRouter($domainConfig, $context);
$this->routersByDomainId[$domainId] = new DomainRouter(
$context,
$basicRouter,
$localizedRouter,
$friendlyUrlRouter
);
}
return $this->routersByDomainId[$domainId];
}
/**
* @param \Shopsys\FrameworkBundle\Component\Domain\Config\DomainConfig $domainConfig
* @return \Symfony\Component\Routing\Router
*/
protected function getBasicRouter(DomainConfig $domainConfig)
{
return new Router(
$this->configLoader,
$this->routerConfiguration,
[],
$this->getRequestContextByDomainConfig($domainConfig)
);
}
/**
* @param \Shopsys\FrameworkBundle\Component\Domain\Config\DomainConfig $domainConfig
* @return \Symfony\Component\Routing\RequestContext
*/
protected function getRequestContextByDomainConfig(DomainConfig $domainConfig)
{
$urlComponents = parse_url($domainConfig->getUrl());
$requestContext = new RequestContext();
$request = $this->requestStack->getCurrentRequest();
if ($request !== null) {
$requestContext->fromRequest($request);
}
if (array_key_exists('path', $urlComponents)) {
$requestContext->setBaseUrl($urlComponents['path']);
}
$requestContext->setScheme($urlComponents['scheme']);
$requestContext->setHost($urlComponents['host']);
if (array_key_exists('port', $urlComponents)) {
if ($urlComponents['scheme'] === 'http') {
$requestContext->setHttpPort($urlComponents['port']);
} elseif ($urlComponents['scheme'] === 'https') {
$requestContext->setHttpsPort($urlComponents['port']);
}
}
return $requestContext;
}
/**
* @param \Shopsys\FrameworkBundle\Component\Domain\Config\DomainConfig $domainConfig
* @return \Shopsys\FrameworkBundle\Component\Router\FriendlyUrl\FriendlyUrlRouter
*/
public function getFriendlyUrlRouter(DomainConfig $domainConfig)
{
$context = $this->getRequestContextByDomainConfig($domainConfig);
return $this->friendlyUrlRouterFactory->createRouter($domainConfig, $context);
}
}
| 1 | 16,305 | strange, for templating\EngineBundle it was `\Symfony\Bundle\FrameworkBundle\` | shopsys-shopsys | php |
@@ -1545,7 +1545,7 @@ ostree_sysroot_simple_write_deployment (OstreeSysroot *sysroot,
g_ptr_array_add (new_deployments, g_object_ref (deployment));
}
- if (!added_new)
+ if ((!added_new) && is_merge_or_booted)
{
g_ptr_array_add (new_deployments, g_object_ref (new_deployment));
added_new = TRUE; | 1 | /* -*- mode: C; c-file-style: "gnu"; indent-tabs-mode: nil; -*-
*
* Copyright (C) 2013 Colin Walters <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#include "config.h"
#include "otutil.h"
#include <sys/file.h>
#include <sys/mount.h>
#include <sys/wait.h>
#include "ostree.h"
#include "ostree-core-private.h"
#include "ostree-repo-private.h"
#include "ostree-sepolicy-private.h"
#include "ostree-sysroot-private.h"
#include "ostree-deployment-private.h"
#include "ostree-bootloader-uboot.h"
#include "ostree-bootloader-syslinux.h"
#include "ostree-bootloader-grub2.h"
static gboolean
find_booted_deployment (OstreeSysroot *self,
GPtrArray *deployments,
OstreeDeployment **out_deployment,
GCancellable *cancellable,
GError **error);
/**
* SECTION:ostree-sysroot
* @title: Root partition mount point
* @short_description: Manage physical root filesystem
*
* A #OstreeSysroot object represents a physical root filesystem,
* which in particular should contain a toplevel /ostree directory.
* Inside this directory is an #OstreeRepo in /ostree/repo, plus a set
* of deployments in /ostree/deploy.
*
* This class is not by default safe against concurrent use by threads
* or external processes. You can use ostree_sysroot_lock() to
* perform locking externally.
*/
typedef struct {
GObjectClass parent_class;
/* Signals */
void (*journal_msg) (OstreeSysroot *sysroot,
const char *msg);
} OstreeSysrootClass;
enum {
JOURNAL_MSG_SIGNAL,
LAST_SIGNAL,
};
static guint signals[LAST_SIGNAL] = { 0 };
enum {
PROP_0,
PROP_PATH
};
G_DEFINE_TYPE (OstreeSysroot, ostree_sysroot, G_TYPE_OBJECT)
static void
ostree_sysroot_finalize (GObject *object)
{
OstreeSysroot *self = OSTREE_SYSROOT (object);
g_clear_object (&self->path);
g_clear_object (&self->repo);
g_clear_pointer (&self->deployments, g_ptr_array_unref);
g_clear_object (&self->booted_deployment);
glnx_release_lock_file (&self->lock);
ostree_sysroot_unload (self);
G_OBJECT_CLASS (ostree_sysroot_parent_class)->finalize (object);
}
static void
ostree_sysroot_set_property(GObject *object,
guint prop_id,
const GValue *value,
GParamSpec *pspec)
{
OstreeSysroot *self = OSTREE_SYSROOT (object);
switch (prop_id)
{
case PROP_PATH:
self->path = g_value_dup_object (value);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
ostree_sysroot_get_property(GObject *object,
guint prop_id,
GValue *value,
GParamSpec *pspec)
{
OstreeSysroot *self = OSTREE_SYSROOT (object);
switch (prop_id)
{
case PROP_PATH:
g_value_set_object (value, self->path);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
ostree_sysroot_constructed (GObject *object)
{
OstreeSysroot *self = OSTREE_SYSROOT (object);
/* Ensure the system root path is set. */
if (self->path == NULL)
self->path = g_object_ref (_ostree_get_default_sysroot_path ());
G_OBJECT_CLASS (ostree_sysroot_parent_class)->constructed (object);
}
static void
ostree_sysroot_class_init (OstreeSysrootClass *klass)
{
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->constructed = ostree_sysroot_constructed;
object_class->get_property = ostree_sysroot_get_property;
object_class->set_property = ostree_sysroot_set_property;
object_class->finalize = ostree_sysroot_finalize;
g_object_class_install_property (object_class,
PROP_PATH,
g_param_spec_object ("path",
"",
"",
G_TYPE_FILE,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY));
/**
* OstreeSysroot::journal-msg:
* @self: Self
* @msg: Human-readable string (should not contain newlines)
*
* libostree will log to the journal various events, such as the /etc merge
* status, and transaction completion. Connect to this signal to also
* synchronously receive the text for those messages. This is intended to be
* used by command line tools which link to libostree as a library.
*
* Currently, the structured data is only available via the systemd journal.
*
* Since: 2017.10
*/
signals[JOURNAL_MSG_SIGNAL] =
g_signal_new ("journal-msg",
G_OBJECT_CLASS_TYPE (object_class),
G_SIGNAL_RUN_LAST,
G_STRUCT_OFFSET (OstreeSysrootClass, journal_msg),
NULL, NULL, NULL, G_TYPE_NONE, 1, G_TYPE_STRING);
}
static void
ostree_sysroot_init (OstreeSysroot *self)
{
const GDebugKey keys[] = {
{ "mutable-deployments", OSTREE_SYSROOT_DEBUG_MUTABLE_DEPLOYMENTS },
{ "test-fifreeze", OSTREE_SYSROOT_DEBUG_TEST_FIFREEZE },
{ "no-xattrs", OSTREE_SYSROOT_DEBUG_NO_XATTRS },
};
self->debug_flags = g_parse_debug_string (g_getenv ("OSTREE_SYSROOT_DEBUG"),
keys, G_N_ELEMENTS (keys));
self->sysroot_fd = -1;
self->lock = (GLnxLockFile)GLNX_LOCK_FILE_INIT;
}
/**
* ostree_sysroot_new:
* @path: (allow-none): Path to a system root directory, or %NULL
*
* Returns: (transfer full): An accessor object for an system root located at @path
*/
OstreeSysroot*
ostree_sysroot_new (GFile *path)
{
return g_object_new (OSTREE_TYPE_SYSROOT, "path", path, NULL);
}
/**
* ostree_sysroot_new_default:
*
* Returns: (transfer full): An accessor for the current visible root / filesystem
*/
OstreeSysroot*
ostree_sysroot_new_default (void)
{
return ostree_sysroot_new (NULL);
}
/**
* ostree_sysroot_get_path:
* @self:
*
* Returns: (transfer none): Path to rootfs
*/
GFile *
ostree_sysroot_get_path (OstreeSysroot *self)
{
return self->path;
}
static gboolean
ensure_sysroot_fd (OstreeSysroot *self,
GError **error)
{
if (self->sysroot_fd == -1)
{
if (!glnx_opendirat (AT_FDCWD, gs_file_get_path_cached (self->path), TRUE,
&self->sysroot_fd, error))
return FALSE;
}
return TRUE;
}
/**
* ostree_sysroot_get_fd:
* @self: Sysroot
*
* Access a file descriptor that refers to the root directory of this
* sysroot. ostree_sysroot_load() must have been invoked prior to
* calling this function.
*
* Returns: A file descriptor valid for the lifetime of @self
*/
int
ostree_sysroot_get_fd (OstreeSysroot *self)
{
g_return_val_if_fail (self->sysroot_fd != -1, -1);
return self->sysroot_fd;
}
gboolean
_ostree_sysroot_bump_mtime (OstreeSysroot *self,
GError **error)
{
/* Allow other systems to monitor for changes */
if (utimensat (self->sysroot_fd, "ostree/deploy", NULL, 0) < 0)
{
glnx_set_prefix_error_from_errno (error, "%s", "futimens");
return FALSE;
}
return TRUE;
}
/**
* ostree_sysroot_unload:
* @self: Sysroot
*
* Release any resources such as file descriptors referring to the
* root directory of this sysroot. Normally, those resources are
* cleared by finalization, but in garbage collected languages that
* may not be predictable.
*
* This undoes the effect of `ostree_sysroot_load()`.
*/
void
ostree_sysroot_unload (OstreeSysroot *self)
{
if (self->sysroot_fd != -1)
{
(void) close (self->sysroot_fd);
self->sysroot_fd = -1;
}
}
/**
* ostree_sysroot_ensure_initialized:
* @self: Sysroot
* @cancellable: Cancellable
* @error: Error
*
* Ensure that @self is set up as a valid rootfs, by creating
* /ostree/repo, among other things.
*/
gboolean
ostree_sysroot_ensure_initialized (OstreeSysroot *self,
GCancellable *cancellable,
GError **error)
{
if (!ensure_sysroot_fd (self, error))
return FALSE;
if (!glnx_shutil_mkdir_p_at (self->sysroot_fd, "ostree/repo", 0755,
cancellable, error))
return FALSE;
if (!glnx_shutil_mkdir_p_at (self->sysroot_fd, "ostree/deploy", 0755,
cancellable, error))
return FALSE;
g_autoptr(OstreeRepo) repo =
ostree_repo_create_at (self->sysroot_fd, "ostree/repo",
OSTREE_REPO_MODE_BARE, NULL,
cancellable, error);
if (!repo)
return FALSE;
return TRUE;
}
void
_ostree_sysroot_emit_journal_msg (OstreeSysroot *self,
const char *msg)
{
g_signal_emit (self, signals[JOURNAL_MSG_SIGNAL], 0, msg);
}
gboolean
_ostree_sysroot_parse_deploy_path_name (const char *name,
char **out_csum,
int *out_serial,
GError **error)
{
static gsize regex_initialized;
static GRegex *regex;
if (g_once_init_enter (®ex_initialized))
{
regex = g_regex_new ("^([0-9a-f]+)\\.([0-9]+)$", 0, 0, NULL);
g_assert (regex);
g_once_init_leave (®ex_initialized, 1);
}
g_autoptr(GMatchInfo) match = NULL;
if (!g_regex_match (regex, name, 0, &match))
return glnx_throw (error, "Invalid deploy name '%s', expected CHECKSUM.TREESERIAL", name);
g_autofree char *serial_str = g_match_info_fetch (match, 2);
*out_csum = g_match_info_fetch (match, 1);
*out_serial = (int)g_ascii_strtoll (serial_str, NULL, 10);
return TRUE;
}
gboolean
_ostree_sysroot_read_current_subbootversion (OstreeSysroot *self,
int bootversion,
int *out_subbootversion,
GCancellable *cancellable,
GError **error)
{
if (!ensure_sysroot_fd (self, error))
return FALSE;
g_autofree char *ostree_bootdir_name = g_strdup_printf ("ostree/boot.%d", bootversion);
struct stat stbuf;
if (fstatat (self->sysroot_fd, ostree_bootdir_name, &stbuf, AT_SYMLINK_NOFOLLOW) != 0)
{
if (errno == ENOENT)
*out_subbootversion = 0;
else
return glnx_throw_errno (error);
}
else
{
g_autofree char *current_subbootdir_name =
glnx_readlinkat_malloc (self->sysroot_fd, ostree_bootdir_name,
cancellable, error);
if (!current_subbootdir_name)
return FALSE;
if (g_str_has_suffix (current_subbootdir_name, ".0"))
*out_subbootversion = 0;
else if (g_str_has_suffix (current_subbootdir_name, ".1"))
*out_subbootversion = 1;
else
return glnx_throw (error, "Invalid target '%s' in %s",
current_subbootdir_name, ostree_bootdir_name);
}
return TRUE;
}
static gint
compare_boot_loader_configs (OstreeBootconfigParser *a,
OstreeBootconfigParser *b)
{
const char *a_version = ostree_bootconfig_parser_get (a, "version");
const char *b_version = ostree_bootconfig_parser_get (b, "version");
if (a_version && b_version)
{
int r = strverscmp (a_version, b_version);
/* Reverse */
return -r;
}
else if (a_version)
return -1;
else
return 1;
}
static int
compare_loader_configs_for_sorting (gconstpointer a_pp,
gconstpointer b_pp)
{
OstreeBootconfigParser *a = *((OstreeBootconfigParser**)a_pp);
OstreeBootconfigParser *b = *((OstreeBootconfigParser**)b_pp);
return compare_boot_loader_configs (a, b);
}
gboolean
_ostree_sysroot_read_boot_loader_configs (OstreeSysroot *self,
int bootversion,
GPtrArray **out_loader_configs,
GCancellable *cancellable,
GError **error)
{
if (!ensure_sysroot_fd (self, error))
return FALSE;
g_autoptr(GPtrArray) ret_loader_configs =
g_ptr_array_new_with_free_func ((GDestroyNotify)g_object_unref);
g_autofree char *entries_path = g_strdup_printf ("boot/loader.%d/entries", bootversion);
gboolean entries_exists;
g_auto(GLnxDirFdIterator) dfd_iter = { 0, };
if (!ot_dfd_iter_init_allow_noent (self->sysroot_fd, entries_path,
&dfd_iter, &entries_exists, error))
return FALSE;
if (!entries_exists)
{
/* Note early return */
*out_loader_configs = g_steal_pointer (&ret_loader_configs);
return TRUE;
}
while (TRUE)
{
struct dirent *dent;
struct stat stbuf;
if (!glnx_dirfd_iterator_next_dent (&dfd_iter, &dent, cancellable, error))
return FALSE;
if (dent == NULL)
break;
if (fstatat (dfd_iter.fd, dent->d_name, &stbuf, 0) != 0)
return glnx_throw_errno (error);
if (g_str_has_prefix (dent->d_name, "ostree-") &&
g_str_has_suffix (dent->d_name, ".conf") &&
S_ISREG (stbuf.st_mode))
{
g_autoptr(OstreeBootconfigParser) config = ostree_bootconfig_parser_new ();
if (!ostree_bootconfig_parser_parse_at (config, dfd_iter.fd, dent->d_name, cancellable, error))
return glnx_prefix_error (error, "Parsing %s", dent->d_name);
g_ptr_array_add (ret_loader_configs, g_object_ref (config));
}
}
/* Callers expect us to give them a sorted array */
g_ptr_array_sort (ret_loader_configs, compare_loader_configs_for_sorting);
ot_transfer_out_value(out_loader_configs, &ret_loader_configs);
return TRUE;
}
static gboolean
read_current_bootversion (OstreeSysroot *self,
int *out_bootversion,
GCancellable *cancellable,
GError **error)
{
int ret_bootversion;
struct stat stbuf;
if (fstatat (self->sysroot_fd, "boot/loader", &stbuf, AT_SYMLINK_NOFOLLOW) != 0)
{
if (errno != ENOENT)
return glnx_throw_errno (error);
ret_bootversion = 0;
}
else
{
if (!S_ISLNK (stbuf.st_mode))
return glnx_throw (error, "Not a symbolic link: boot/loader");
g_autofree char *target =
glnx_readlinkat_malloc (self->sysroot_fd, "boot/loader", cancellable, error);
if (!target)
return FALSE;
if (g_strcmp0 (target, "loader.0") == 0)
ret_bootversion = 0;
else if (g_strcmp0 (target, "loader.1") == 0)
ret_bootversion = 1;
else
return glnx_throw (error, "Invalid target '%s' in boot/loader", target);
}
*out_bootversion = ret_bootversion;
return TRUE;
}
static gboolean
parse_origin (OstreeSysroot *self,
int deployment_dfd,
const char *deployment_name,
GKeyFile **out_origin,
GCancellable *cancellable,
GError **error)
{
g_autofree char *origin_path = g_strconcat ("../", deployment_name, ".origin", NULL);
g_autoptr(GKeyFile) ret_origin = g_key_file_new ();
struct stat stbuf;
if (fstatat (deployment_dfd, origin_path, &stbuf, 0) != 0)
{
if (errno != ENOENT)
return glnx_throw_errno (error);
}
else
{
g_autofree char *origin_contents =
glnx_file_get_contents_utf8_at (deployment_dfd, origin_path,
NULL, cancellable, error);
if (!origin_contents)
return FALSE;
if (!g_key_file_load_from_data (ret_origin, origin_contents, -1, 0, error))
return glnx_prefix_error (error, "Parsing %s", origin_path);
}
ot_transfer_out_value(out_origin, &ret_origin);
return TRUE;
}
static gboolean
parse_bootlink (const char *bootlink,
int *out_entry_bootversion,
char **out_osname,
char **out_bootcsum,
int *out_treebootserial,
GError **error)
{
static gsize regex_initialized;
static GRegex *regex;
if (g_once_init_enter (®ex_initialized))
{
regex = g_regex_new ("^/ostree/boot.([01])/([^/]+)/([^/]+)/([0-9]+)$", 0, 0, NULL);
g_assert (regex);
g_once_init_leave (®ex_initialized, 1);
}
g_autoptr(GMatchInfo) match = NULL;
if (!g_regex_match (regex, bootlink, 0, &match))
return glnx_throw (error, "Invalid ostree= argument '%s', expected ostree=/ostree/boot.BOOTVERSION/OSNAME/BOOTCSUM/TREESERIAL", bootlink);
g_autofree char *bootversion_str = g_match_info_fetch (match, 1);
g_autofree char *treebootserial_str = g_match_info_fetch (match, 4);
*out_entry_bootversion = (int)g_ascii_strtoll (bootversion_str, NULL, 10);
*out_osname = g_match_info_fetch (match, 2);
*out_bootcsum = g_match_info_fetch (match, 3);
*out_treebootserial = (int)g_ascii_strtoll (treebootserial_str, NULL, 10);
return TRUE;
}
static char *
get_unlocked_development_path (OstreeDeployment *deployment)
{
return g_strdup_printf ("%s%s.%d/%s",
_OSTREE_SYSROOT_DEPLOYMENT_RUNSTATE_DIR,
ostree_deployment_get_csum (deployment),
ostree_deployment_get_deployserial (deployment),
_OSTREE_SYSROOT_DEPLOYMENT_RUNSTATE_FLAG_DEVELOPMENT);
}
static gboolean
parse_deployment (OstreeSysroot *self,
const char *boot_link,
OstreeDeployment **out_deployment,
GCancellable *cancellable,
GError **error)
{
if (!ensure_sysroot_fd (self, error))
return FALSE;
int entry_boot_version;
g_autofree char *osname = NULL;
g_autofree char *bootcsum = NULL;
int treebootserial = -1;
if (!parse_bootlink (boot_link, &entry_boot_version,
&osname, &bootcsum, &treebootserial,
error))
return FALSE;
const char *relative_boot_link = boot_link;
if (*relative_boot_link == '/')
relative_boot_link++;
g_autofree char *treebootserial_target =
glnx_readlinkat_malloc (self->sysroot_fd, relative_boot_link,
cancellable, error);
if (!treebootserial_target)
return FALSE;
const char *deploy_basename = glnx_basename (treebootserial_target);
g_autofree char *treecsum = NULL;
int deployserial = -1;
if (!_ostree_sysroot_parse_deploy_path_name (deploy_basename,
&treecsum, &deployserial, error))
return FALSE;
glnx_fd_close int deployment_dfd = -1;
if (!glnx_opendirat (self->sysroot_fd, relative_boot_link, TRUE,
&deployment_dfd, error))
return FALSE;
g_autoptr(GKeyFile) origin = NULL;
if (!parse_origin (self, deployment_dfd, deploy_basename, &origin,
cancellable, error))
return FALSE;
g_autoptr(OstreeDeployment) ret_deployment
= ostree_deployment_new (-1, osname, treecsum, deployserial,
bootcsum, treebootserial);
if (origin)
ostree_deployment_set_origin (ret_deployment, origin);
ret_deployment->unlocked = OSTREE_DEPLOYMENT_UNLOCKED_NONE;
g_autofree char *unlocked_development_path = get_unlocked_development_path (ret_deployment);
struct stat stbuf;
if (lstat (unlocked_development_path, &stbuf) == 0)
ret_deployment->unlocked = OSTREE_DEPLOYMENT_UNLOCKED_DEVELOPMENT;
else
{
g_autofree char *existing_unlocked_state =
g_key_file_get_string (origin, "origin", "unlocked", NULL);
if (g_strcmp0 (existing_unlocked_state, "hotfix") == 0)
{
ret_deployment->unlocked = OSTREE_DEPLOYMENT_UNLOCKED_HOTFIX;
}
/* TODO: warn on unknown unlock types? */
}
g_debug ("Deployment %s.%d unlocked=%d", treecsum, deployserial, ret_deployment->unlocked);
if (out_deployment)
*out_deployment = g_steal_pointer (&ret_deployment);
return TRUE;
}
static char *
get_ostree_kernel_arg_from_config (OstreeBootconfigParser *config)
{
const char *options;
char *ret = NULL;
char **opts, **iter;
options = ostree_bootconfig_parser_get (config, "options");
if (!options)
return NULL;
opts = g_strsplit (options, " ", -1);
for (iter = opts; *iter; iter++)
{
const char *opt = *iter;
if (g_str_has_prefix (opt, "ostree="))
{
ret = g_strdup (opt + strlen ("ostree="));
break;
}
}
g_strfreev (opts);
return ret;
}
static gboolean
list_deployments_process_one_boot_entry (OstreeSysroot *self,
OstreeBootconfigParser *config,
GPtrArray *inout_deployments,
GCancellable *cancellable,
GError **error)
{
g_autofree char *ostree_arg = get_ostree_kernel_arg_from_config (config);
if (ostree_arg == NULL)
return glnx_throw (error, "No ostree= kernel argument found");
g_autoptr(OstreeDeployment) deployment = NULL;
if (!parse_deployment (self, ostree_arg, &deployment,
cancellable, error))
return FALSE;
ostree_deployment_set_bootconfig (deployment, config);
g_ptr_array_add (inout_deployments, g_object_ref (deployment));
return TRUE;
}
static gint
compare_deployments_by_boot_loader_version_reversed (gconstpointer a_pp,
gconstpointer b_pp)
{
OstreeDeployment *a = *((OstreeDeployment**)a_pp);
OstreeDeployment *b = *((OstreeDeployment**)b_pp);
OstreeBootconfigParser *a_bootconfig = ostree_deployment_get_bootconfig (a);
OstreeBootconfigParser *b_bootconfig = ostree_deployment_get_bootconfig (b);
return compare_boot_loader_configs (a_bootconfig, b_bootconfig);
}
/**
* ostree_sysroot_load:
* @self: Sysroot
* @cancellable: Cancellable
* @error: Error
*
* Load deployment list, bootversion, and subbootversion from the
* rootfs @self.
*/
gboolean
ostree_sysroot_load (OstreeSysroot *self,
GCancellable *cancellable,
GError **error)
{
return ostree_sysroot_load_if_changed (self, NULL, cancellable, error);
}
static gboolean
ensure_repo (OstreeSysroot *self,
GError **error)
{
if (self->repo != NULL)
return TRUE;
if (!ensure_sysroot_fd (self, error))
return FALSE;
self->repo = ostree_repo_open_at (self->sysroot_fd, "ostree/repo", NULL, error);
if (!self->repo)
return FALSE;
/* Flag it as having been created via ostree_sysroot_get_repo(), and hold a
* weak ref for the remote-add handling.
*/
g_weak_ref_init (&self->repo->sysroot, self);
self->repo->sysroot_kind = OSTREE_REPO_SYSROOT_KIND_VIA_SYSROOT;
return TRUE;
}
gboolean
ostree_sysroot_load_if_changed (OstreeSysroot *self,
gboolean *out_changed,
GCancellable *cancellable,
GError **error)
{
if (!ensure_sysroot_fd (self, error))
return FALSE;
/* Here we also lazily initialize the repository. We didn't do this
* previous to v2017.6, but we do now to support the error-free
* ostree_sysroot_repo() API.
*/
if (!ensure_repo (self, error))
return FALSE;
int bootversion = 0;
if (!read_current_bootversion (self, &bootversion, cancellable, error))
return FALSE;
int subbootversion = 0;
if (!_ostree_sysroot_read_current_subbootversion (self, bootversion, &subbootversion,
cancellable, error))
return FALSE;
struct stat stbuf;
if (fstatat (self->sysroot_fd, "ostree/deploy", &stbuf, 0) < 0)
return glnx_throw_errno_prefix (error, "fstatat");
if (out_changed)
{
if (self->loaded_ts.tv_sec == stbuf.st_mtim.tv_sec &&
self->loaded_ts.tv_nsec == stbuf.st_mtim.tv_nsec)
{
*out_changed = FALSE;
/* Note early return */
return TRUE;
}
}
g_clear_pointer (&self->deployments, g_ptr_array_unref);
g_clear_object (&self->booted_deployment);
self->bootversion = -1;
self->subbootversion = -1;
g_autoptr(GPtrArray) boot_loader_configs = NULL;
if (!_ostree_sysroot_read_boot_loader_configs (self, bootversion, &boot_loader_configs,
cancellable, error))
return FALSE;
g_autoptr(GPtrArray) deployments = g_ptr_array_new_with_free_func ((GDestroyNotify)g_object_unref);
for (guint i = 0; i < boot_loader_configs->len; i++)
{
OstreeBootconfigParser *config = boot_loader_configs->pdata[i];
if (!list_deployments_process_one_boot_entry (self, config, deployments,
cancellable, error))
return FALSE;
}
g_ptr_array_sort (deployments, compare_deployments_by_boot_loader_version_reversed);
for (guint i = 0; i < deployments->len; i++)
{
OstreeDeployment *deployment = deployments->pdata[i];
ostree_deployment_set_index (deployment, i);
}
if (!find_booted_deployment (self, deployments, &self->booted_deployment,
cancellable, error))
return FALSE;
/* Determine whether we're "physical" or not, the first time we initialize */
if (!self->loaded)
{
/* If we have a booted deployment, the sysroot is / and we're definitely
* not physical.
*/
if (self->booted_deployment)
self->is_physical = FALSE; /* (the default, but explicit for clarity) */
/* Otherwise - check for /sysroot which should only exist in a deployment,
* not in ${sysroot} (a metavariable for the real physical root).
*/
else if (fstatat (self->sysroot_fd, "sysroot", &stbuf, 0) < 0)
{
if (errno != ENOENT)
return glnx_throw_errno_prefix (error, "fstatat");
self->is_physical = TRUE;
}
/* Otherwise, the default is FALSE */
}
self->bootversion = bootversion;
self->subbootversion = subbootversion;
self->deployments = deployments;
deployments = NULL; /* Transfer ownership */
self->loaded = TRUE;
self->loaded_ts = stbuf.st_mtim;
if (out_changed)
*out_changed = TRUE;
return TRUE;
}
int
ostree_sysroot_get_bootversion (OstreeSysroot *self)
{
return self->bootversion;
}
int
ostree_sysroot_get_subbootversion (OstreeSysroot *self)
{
return self->subbootversion;
}
/**
* ostree_sysroot_get_booted_deployment:
* @self: Sysroot
*
* Returns: (transfer none): The currently booted deployment, or %NULL if none
*/
OstreeDeployment *
ostree_sysroot_get_booted_deployment (OstreeSysroot *self)
{
g_return_val_if_fail (self->loaded, NULL);
return self->booted_deployment;
}
/**
* ostree_sysroot_get_deployments:
* @self: Sysroot
*
* Returns: (element-type OstreeDeployment) (transfer container): Ordered list of deployments
*/
GPtrArray *
ostree_sysroot_get_deployments (OstreeSysroot *self)
{
GPtrArray *copy;
guint i;
g_return_val_if_fail (self->loaded, NULL);
copy = g_ptr_array_new_with_free_func ((GDestroyNotify)g_object_unref);
for (i = 0; i < self->deployments->len; i++)
g_ptr_array_add (copy, g_object_ref (self->deployments->pdata[i]));
return copy;
}
/**
* ostree_sysroot_get_deployment_dirpath:
* @self: Repo
* @deployment: A deployment
*
* Note this function only returns a *relative* path - if you want
* to access, it, you must either use fd-relative api such as openat(),
* or concatenate it with the full ostree_sysroot_get_path().
*
* Returns: (transfer full): Path to deployment root directory, relative to sysroot
*/
char *
ostree_sysroot_get_deployment_dirpath (OstreeSysroot *self,
OstreeDeployment *deployment)
{
return g_strdup_printf ("ostree/deploy/%s/deploy/%s.%d",
ostree_deployment_get_osname (deployment),
ostree_deployment_get_csum (deployment),
ostree_deployment_get_deployserial (deployment));
}
/**
* ostree_sysroot_get_deployment_directory:
* @self: Sysroot
* @deployment: A deployment
*
* Returns: (transfer full): Path to deployment root directory
*/
GFile *
ostree_sysroot_get_deployment_directory (OstreeSysroot *self,
OstreeDeployment *deployment)
{
g_autofree char *dirpath = ostree_sysroot_get_deployment_dirpath (self, deployment);
return g_file_resolve_relative_path (self->path, dirpath);
}
/**
* ostree_sysroot_get_deployment_origin_path:
* @deployment_path: A deployment path
*
* Returns: (transfer full): Path to deployment origin file
*/
GFile *
ostree_sysroot_get_deployment_origin_path (GFile *deployment_path)
{
g_autoptr(GFile) deployment_parent = g_file_get_parent (deployment_path);
return ot_gfile_resolve_path_printf (deployment_parent,
"%s.origin",
gs_file_get_path_cached (deployment_path));
}
/**
* ostree_sysroot_get_repo:
* @self: Sysroot
* @out_repo: (out): Repository in sysroot @self
* @cancellable: Cancellable
* @error: Error
*
* Retrieve the OSTree repository in sysroot @self.
*/
gboolean
ostree_sysroot_get_repo (OstreeSysroot *self,
OstreeRepo **out_repo,
GCancellable *cancellable,
GError **error)
{
if (!ensure_repo (self, error))
return FALSE;
if (out_repo != NULL)
*out_repo = g_object_ref (self->repo);
return TRUE;
}
/**
* ostree_sysroot_repo:
* @self: Sysroot
*
* This function is a variant of ostree_sysroot_get_repo() that cannot fail, and
* returns a cached repository. Can only be called after ostree_sysroot_load()
* has been invoked successfully.
*
* Returns: (transfer none): The OSTree repository in sysroot @self.
*/
OstreeRepo *
ostree_sysroot_repo (OstreeSysroot *self)
{
g_return_val_if_fail (self->loaded, NULL);
g_assert (self->repo);
return self->repo;
}
/**
* ostree_sysroot_query_bootloader:
* @sysroot: Sysroot
* @out_bootloader: (out) (transfer full) (allow-none): Return location for bootloader, may be %NULL
* @cancellable: Cancellable
* @error: Error
*/
gboolean
_ostree_sysroot_query_bootloader (OstreeSysroot *sysroot,
OstreeBootloader **out_bootloader,
GCancellable *cancellable,
GError **error)
{
gboolean is_active;
g_autoptr(OstreeBootloader) ret_loader =
(OstreeBootloader*)_ostree_bootloader_syslinux_new (sysroot);
if (!_ostree_bootloader_query (ret_loader, &is_active,
cancellable, error))
return FALSE;
if (!is_active)
{
g_object_unref (ret_loader);
ret_loader = (OstreeBootloader*)_ostree_bootloader_grub2_new (sysroot);
if (!_ostree_bootloader_query (ret_loader, &is_active,
cancellable, error))
return FALSE;
}
if (!is_active)
{
g_object_unref (ret_loader);
ret_loader = (OstreeBootloader*)_ostree_bootloader_uboot_new (sysroot);
if (!_ostree_bootloader_query (ret_loader, &is_active, cancellable, error))
return FALSE;
}
if (!is_active)
g_clear_object (&ret_loader);
ot_transfer_out_value(out_bootloader, &ret_loader);
return TRUE;
}
char *
_ostree_sysroot_join_lines (GPtrArray *lines)
{
GString *buf = g_string_new ("");
guint i;
gboolean prev_was_empty = FALSE;
for (i = 0; i < lines->len; i++)
{
const char *line = lines->pdata[i];
/* Special bit to remove extraneous empty lines */
if (*line == '\0')
{
if (prev_was_empty || i == 0)
continue;
else
prev_was_empty = TRUE;
}
g_string_append (buf, line);
g_string_append_c (buf, '\n');
}
return g_string_free (buf, FALSE);
}
static gboolean
parse_kernel_commandline (OstreeKernelArgs **out_args,
GCancellable *cancellable,
GError **error)
{
g_autoptr(GFile) proc_cmdline = g_file_new_for_path ("/proc/cmdline");
g_autofree char *contents = NULL;
gsize len;
if (!g_file_load_contents (proc_cmdline, cancellable, &contents, &len, NULL,
error))
return FALSE;
g_strchomp (contents);
*out_args = _ostree_kernel_args_from_string (contents);
return TRUE;
}
static gboolean
find_booted_deployment (OstreeSysroot *self,
GPtrArray *deployments,
OstreeDeployment **out_deployment,
GCancellable *cancellable,
GError **error)
{
struct stat root_stbuf;
struct stat self_stbuf;
g_autoptr(OstreeDeployment) ret_deployment = NULL;
if (stat ("/", &root_stbuf) != 0)
return glnx_throw_errno_prefix (error, "stat /");
if (!ensure_sysroot_fd (self, error))
return FALSE;
if (fstat (self->sysroot_fd, &self_stbuf) != 0)
return glnx_throw_errno_prefix (error, "fstat");
if (root_stbuf.st_dev == self_stbuf.st_dev &&
root_stbuf.st_ino == self_stbuf.st_ino)
{
__attribute__((cleanup(_ostree_kernel_args_cleanup))) OstreeKernelArgs *kernel_args = NULL;
if (!parse_kernel_commandline (&kernel_args, cancellable, error))
return FALSE;
const char *bootlink_arg = _ostree_kernel_args_get_last_value (kernel_args, "ostree");
if (bootlink_arg)
{
for (guint i = 0; i < deployments->len; i++)
{
OstreeDeployment *deployment = deployments->pdata[i];
g_autofree char *deployment_path = ostree_sysroot_get_deployment_dirpath (self, deployment);
struct stat stbuf;
if (fstatat (self->sysroot_fd, deployment_path, &stbuf, 0) != 0)
return glnx_throw_errno_prefix (error, "fstatat");
if (stbuf.st_dev == root_stbuf.st_dev &&
stbuf.st_ino == root_stbuf.st_ino)
{
ret_deployment = g_object_ref (deployment);
break;
}
}
if (ret_deployment == NULL)
return glnx_throw (error, "Unexpected state: ostree= kernel argument found, but / is not a deployment root");
}
else
{
/* Not an ostree system */
}
}
ot_transfer_out_value (out_deployment, &ret_deployment);
return TRUE;
}
/**
* ostree_sysroot_query_deployments_for:
* @self: Sysroot
* @osname: (allow-none): "stateroot" name
* @out_pending: (out) (allow-none) (transfer full): The pending deployment
* @out_rollback: (out) (allow-none) (transfer full): The rollback deployment
*
* Find the pending and rollback deployments for @osname. Pass %NULL for @osname
* to use the booted deployment's osname. By default, pending deployment is the
* first deployment in the order that matches @osname, and @rollback will be the
* next one after the booted deployment, or the deployment after the pending if
* we're not looking at the booted deployment.
*
* Since: 2017.7
*/
void
ostree_sysroot_query_deployments_for (OstreeSysroot *self,
const char *osname,
OstreeDeployment **out_pending,
OstreeDeployment **out_rollback)
{
g_return_if_fail (osname != NULL || self->booted_deployment != NULL);
g_autoptr(OstreeDeployment) ret_pending = NULL;
g_autoptr(OstreeDeployment) ret_rollback = NULL;
if (osname == NULL)
osname = ostree_deployment_get_osname (self->booted_deployment);
gboolean found_booted = FALSE;
for (guint i = 0; i < self->deployments->len; i++)
{
OstreeDeployment *deployment = self->deployments->pdata[i];
/* Is this deployment booted? If so, note we're past the booted */
if (self->booted_deployment != NULL &&
ostree_deployment_equal (deployment, self->booted_deployment))
{
found_booted = TRUE;
continue;
}
/* Ignore deployments not for this osname */
if (strcmp (ostree_deployment_get_osname (deployment), osname) != 0)
continue;
if (!found_booted && !ret_pending)
ret_pending = g_object_ref (deployment);
else if (found_booted && !ret_rollback)
ret_rollback = g_object_ref (deployment);
}
if (out_pending)
*out_pending = g_steal_pointer (&ret_pending);
if (out_rollback)
*out_rollback = g_steal_pointer (&ret_rollback);
}
/**
* ostree_sysroot_get_merge_deployment:
* @self: Sysroot
* @osname: (allow-none): Operating system group
*
* Find the deployment to use as a configuration merge source; this is
* the first one in the current deployment list which matches osname.
*
* Returns: (transfer full): Configuration merge deployment
*/
OstreeDeployment *
ostree_sysroot_get_merge_deployment (OstreeSysroot *self,
const char *osname)
{
g_return_val_if_fail (osname != NULL || self->booted_deployment != NULL, NULL);
if (osname == NULL)
osname = ostree_deployment_get_osname (self->booted_deployment);
/* If we're booted into the OS into which we're deploying, then
* merge the currently *booted* configuration, rather than the most
* recently deployed.
*/
if (self->booted_deployment &&
g_strcmp0 (ostree_deployment_get_osname (self->booted_deployment), osname) == 0)
return g_object_ref (self->booted_deployment);
else
{
g_autoptr(OstreeDeployment) pending = NULL;
ostree_sysroot_query_deployments_for (self, osname, &pending, NULL);
return g_steal_pointer (&pending);
}
}
/**
* ostree_sysroot_origin_new_from_refspec:
* @self: Sysroot
* @refspec: A refspec
*
* Returns: (transfer full): A new config file which sets @refspec as an origin
*/
GKeyFile *
ostree_sysroot_origin_new_from_refspec (OstreeSysroot *self,
const char *refspec)
{
GKeyFile *ret = g_key_file_new ();
g_key_file_set_string (ret, "origin", "refspec", refspec);
return ret;
}
/**
* ostree_sysroot_lock:
* @self: Self
* @error: Error
*
* Acquire an exclusive multi-process write lock for @self. This call
* blocks until the lock has been acquired. The lock is not
* reentrant.
*
* Release the lock with ostree_sysroot_unlock(). The lock will also
* be released if @self is deallocated.
*/
gboolean
ostree_sysroot_lock (OstreeSysroot *self,
GError **error)
{
if (!ensure_sysroot_fd (self, error))
return FALSE;
return glnx_make_lock_file (self->sysroot_fd, OSTREE_SYSROOT_LOCKFILE,
LOCK_EX, &self->lock, error);
}
/**
* ostree_sysroot_try_lock:
* @self: Self
* @out_acquired: (out): Whether or not the lock has been acquired
* @error: Error
*
* Try to acquire an exclusive multi-process write lock for @self. If
* another process holds the lock, this function will return
* immediately, setting @out_acquired to %FALSE, and returning %TRUE
* (and no error).
*
* Release the lock with ostree_sysroot_unlock(). The lock will also
* be released if @self is deallocated.
*/
gboolean
ostree_sysroot_try_lock (OstreeSysroot *self,
gboolean *out_acquired,
GError **error)
{
g_autoptr(GError) local_error = NULL;
if (!ensure_sysroot_fd (self, error))
return FALSE;
/* Note use of LOCK_NB */
if (!glnx_make_lock_file (self->sysroot_fd, OSTREE_SYSROOT_LOCKFILE,
LOCK_EX | LOCK_NB, &self->lock, &local_error))
{
if (g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_WOULD_BLOCK))
{
*out_acquired = FALSE;
}
else
{
g_propagate_error (error, g_steal_pointer (&local_error));
return FALSE;
}
}
else
{
*out_acquired = TRUE;
}
return TRUE;
}
/**
* ostree_sysroot_unlock:
* @self: Self
*
* Clear the lock previously acquired with ostree_sysroot_lock(). It
* is safe to call this function if the lock has not been previously
* acquired.
*/
void
ostree_sysroot_unlock (OstreeSysroot *self)
{
glnx_release_lock_file (&self->lock);
}
static void
lock_in_thread (GTask *task,
gpointer source,
gpointer task_data,
GCancellable *cancellable)
{
GError *local_error = NULL;
OstreeSysroot *self = source;
if (!ostree_sysroot_lock (self, &local_error))
goto out;
if (g_cancellable_set_error_if_cancelled (cancellable, &local_error))
ostree_sysroot_unlock (self);
out:
if (local_error)
g_task_return_error (task, local_error);
else
g_task_return_boolean (task, TRUE);
}
/**
* ostree_sysroot_lock_async:
* @self: Self
* @cancellable: Cancellable
* @callback: Callback
* @user_data: User data
*
* An asynchronous version of ostree_sysroot_lock().
*/
void
ostree_sysroot_lock_async (OstreeSysroot *self,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data)
{
g_autoptr(GTask) task = g_task_new (self, cancellable, callback, user_data);
g_task_run_in_thread (task, lock_in_thread);
}
/**
* ostree_sysroot_lock_finish:
* @self: Self
* @result: Result
* @error: Error
*
* Call when ostree_sysroot_lock_async() is ready.
*/
gboolean
ostree_sysroot_lock_finish (OstreeSysroot *self,
GAsyncResult *result,
GError **error)
{
g_return_val_if_fail (g_task_is_valid (result, self), FALSE);
return g_task_propagate_boolean ((GTask*)result, error);
}
/**
* ostree_sysroot_init_osname:
* @self: Sysroot
* @osname: Name group of operating system checkouts
* @cancellable: Cancellable
* @error: Error
*
* Initialize the directory structure for an "osname", which is a
* group of operating system deployments, with a shared `/var`. One
* is required for generating a deployment.
*/
gboolean
ostree_sysroot_init_osname (OstreeSysroot *self,
const char *osname,
GCancellable *cancellable,
GError **error)
{
if (!ensure_sysroot_fd (self, error))
return FALSE;
const char *deploydir = glnx_strjoina ("ostree/deploy/", osname);
if (mkdirat (self->sysroot_fd, deploydir, 0777) < 0)
return glnx_throw_errno_prefix (error, "Creating %s", deploydir);
glnx_fd_close int dfd = -1;
if (!glnx_opendirat (self->sysroot_fd, deploydir, TRUE, &dfd, error))
return FALSE;
if (mkdirat (dfd, "var", 0777) < 0)
return glnx_throw_errno_prefix (error, "Creating %s", "var");
/* This is a bit of a legacy hack...but we have to keep it around
* now. We're ensuring core subdirectories of /var exist.
*/
if (mkdirat (dfd, "var/tmp", 0777) < 0)
return glnx_throw_errno_prefix (error, "Creating %s", "var/tmp");
if (fchmodat (dfd, "var/tmp", 01777, 0) < 0)
return glnx_throw_errno_prefix (error, "fchmod %s", "var/tmp");
if (mkdirat (dfd, "var/lib", 0777) < 0)
return glnx_throw_errno_prefix (error, "Creating %s", "var/tmp");
/* This needs to be available and properly labeled early during the boot
* process (before tmpfiles.d kicks in), so that journald can flush logs from
* the first boot there. https://bugzilla.redhat.com/show_bug.cgi?id=1265295
* */
if (mkdirat (dfd, "var/log", 0755) < 0)
return glnx_throw_errno_prefix (error, "Creating %s", "var/log");
if (symlinkat ("../run", dfd, "var/run") < 0)
return glnx_throw_errno_prefix (error, "Symlinking %s", "var/run");
if (symlinkat ("../run/lock", dfd, "var/lock") < 0)
return glnx_throw_errno_prefix (error, "Symlinking %s", "var/lock");
if (!_ostree_sysroot_bump_mtime (self, error))
return FALSE;
return TRUE;
}
/**
* ostree_sysroot_simple_write_deployment:
* @sysroot: Sysroot
* @osname: (allow-none): OS name
* @new_deployment: Prepend this deployment to the list
* @merge_deployment: (allow-none): Use this deployment for configuration merge
* @flags: Flags controlling behavior
* @cancellable: Cancellable
* @error: Error
*
* Prepend @new_deployment to the list of deployments, commit, and
* cleanup. By default, all other deployments for the given @osname
* except the merge deployment and the booted deployment will be
* garbage collected.
*
* If %OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_RETAIN is
* specified, then all current deployments will be kept.
*
* If %OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_NOT_DEFAULT is
* specified, then instead of prepending, the new deployment will be
* added right after the booted or merge deployment, instead of first.
*
* If %OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_NO_CLEAN is
* specified, then no cleanup will be performed after adding the
* deployment. Make sure to call ostree_sysroot_cleanup() sometime
* later, instead.
*/
gboolean
ostree_sysroot_simple_write_deployment (OstreeSysroot *sysroot,
const char *osname,
OstreeDeployment *new_deployment,
OstreeDeployment *merge_deployment,
OstreeSysrootSimpleWriteDeploymentFlags flags,
GCancellable *cancellable,
GError **error)
{
gboolean ret = FALSE;
guint i;
OstreeDeployment *booted_deployment = NULL;
g_autoptr(GPtrArray) deployments = NULL;
g_autoptr(GPtrArray) new_deployments = g_ptr_array_new_with_free_func (g_object_unref);
const gboolean postclean = (flags & OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_NO_CLEAN) == 0;
OstreeSysrootWriteDeploymentsOpts write_opts = { .do_postclean = postclean };
gboolean retain = (flags & OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_RETAIN) > 0;
const gboolean make_default = !((flags & OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_NOT_DEFAULT) > 0);
gboolean added_new = FALSE;
deployments = ostree_sysroot_get_deployments (sysroot);
booted_deployment = ostree_sysroot_get_booted_deployment (sysroot);
if (osname == NULL && booted_deployment)
osname = ostree_deployment_get_osname (booted_deployment);
if (make_default)
{
g_ptr_array_add (new_deployments, g_object_ref (new_deployment));
added_new = TRUE;
}
for (i = 0; i < deployments->len; i++)
{
OstreeDeployment *deployment = deployments->pdata[i];
const gboolean is_merge_or_booted =
ostree_deployment_equal (deployment, booted_deployment) ||
ostree_deployment_equal (deployment, merge_deployment);
/* Keep deployments with different osnames, as well as the
* booted and merge deployments
*/
if (retain ||
(osname != NULL && strcmp (ostree_deployment_get_osname (deployment), osname) != 0) ||
is_merge_or_booted)
{
g_ptr_array_add (new_deployments, g_object_ref (deployment));
}
if (!added_new)
{
g_ptr_array_add (new_deployments, g_object_ref (new_deployment));
added_new = TRUE;
}
}
/* In this non-default case , an improvement in the future would be
* to put the new deployment right after the current default in the
* order.
*/
if (!added_new)
{
g_ptr_array_add (new_deployments, g_object_ref (new_deployment));
added_new = TRUE;
}
if (!ostree_sysroot_write_deployments_with_options (sysroot, new_deployments, &write_opts,
cancellable, error))
goto out;
ret = TRUE;
out:
return ret;
}
/* Deploy a copy of @target_deployment */
static gboolean
clone_deployment (OstreeSysroot *sysroot,
OstreeDeployment *target_deployment,
OstreeDeployment *merge_deployment,
GCancellable *cancellable,
GError **error)
{
/* Ensure we have a clean slate */
if (!ostree_sysroot_prepare_cleanup (sysroot, cancellable, error))
return glnx_prefix_error (error, "Performing initial cleanup");
/* Copy the bootloader config options */
OstreeBootconfigParser *bootconfig = ostree_deployment_get_bootconfig (merge_deployment);
g_auto(GStrv) previous_args = g_strsplit (ostree_bootconfig_parser_get (bootconfig, "options"), " ", -1);
__attribute__((cleanup(_ostree_kernel_args_cleanup))) OstreeKernelArgs *kargs = _ostree_kernel_args_new ();
_ostree_kernel_args_append_argv (kargs, previous_args);
/* Deploy the copy */
g_autoptr(OstreeDeployment) new_deployment = NULL;
g_auto(GStrv) kargs_strv = _ostree_kernel_args_to_strv (kargs);
if (!ostree_sysroot_deploy_tree (sysroot,
ostree_deployment_get_osname (target_deployment),
ostree_deployment_get_csum (target_deployment),
ostree_deployment_get_origin (target_deployment),
merge_deployment, kargs_strv, &new_deployment,
cancellable, error))
return FALSE;
/* Hotfixes push the deployment as rollback target, so it shouldn't
* be the default.
*/
if (!ostree_sysroot_simple_write_deployment (sysroot, ostree_deployment_get_osname (target_deployment),
new_deployment, merge_deployment,
OSTREE_SYSROOT_SIMPLE_WRITE_DEPLOYMENT_FLAGS_NOT_DEFAULT,
cancellable, error))
return FALSE;
return TRUE;
}
/**
* ostree_sysroot_deployment_unlock:
* @self: Sysroot
* @deployment: Deployment
* @unlocked_state: Transition to this unlocked state
* @cancellable: Cancellable
* @error: Error
*
* Configure the target deployment @deployment such that it
* is writable. There are multiple modes, essentially differing
* in whether or not any changes persist across reboot.
*
* The `OSTREE_DEPLOYMENT_UNLOCKED_HOTFIX` state is persistent
* across reboots.
*/
gboolean
ostree_sysroot_deployment_unlock (OstreeSysroot *self,
OstreeDeployment *deployment,
OstreeDeploymentUnlockedState unlocked_state,
GCancellable *cancellable,
GError **error)
{
/* This function cannot re-lock */
g_return_val_if_fail (unlocked_state != OSTREE_DEPLOYMENT_UNLOCKED_NONE, FALSE);
OstreeDeploymentUnlockedState current_unlocked = ostree_deployment_get_unlocked (deployment);
if (current_unlocked != OSTREE_DEPLOYMENT_UNLOCKED_NONE)
return glnx_throw (error, "Deployment is already in unlocked state: %s",
ostree_deployment_unlocked_state_to_string (current_unlocked));
g_autoptr(OstreeDeployment) merge_deployment =
ostree_sysroot_get_merge_deployment (self, ostree_deployment_get_osname (deployment));
if (!merge_deployment)
return glnx_throw (error, "No previous deployment to duplicate");
/* For hotfixes, we push a rollback target */
if (unlocked_state == OSTREE_DEPLOYMENT_UNLOCKED_HOTFIX)
{
if (!clone_deployment (self, deployment, merge_deployment, cancellable, error))
return FALSE;
}
/* Crack it open */
if (!ostree_sysroot_deployment_set_mutable (self, deployment, TRUE,
cancellable, error))
return FALSE;
g_autofree char *deployment_path = ostree_sysroot_get_deployment_dirpath (self, deployment);
glnx_fd_close int deployment_dfd = -1;
if (!glnx_opendirat (self->sysroot_fd, deployment_path, TRUE, &deployment_dfd, error))
return FALSE;
g_autoptr(OstreeSePolicy) sepolicy = ostree_sepolicy_new_at (deployment_dfd, cancellable, error);
if (!sepolicy)
return FALSE;
const char *ovl_options = NULL;
switch (unlocked_state)
{
case OSTREE_DEPLOYMENT_UNLOCKED_NONE:
g_assert_not_reached ();
break;
case OSTREE_DEPLOYMENT_UNLOCKED_HOTFIX:
{
const char hotfix_ovl_options[] = "lowerdir=usr,upperdir=.usr-ovl-upper,workdir=.usr-ovl-work";
/* Create the overlayfs directories in the deployment root
* directly for hotfixes. The ostree-prepare-root.c helper
* is also set up to detect and mount these.
*/
if (!glnx_shutil_mkdir_p_at (deployment_dfd, ".usr-ovl-upper", 0755, cancellable, error))
return FALSE;
if (!glnx_shutil_mkdir_p_at (deployment_dfd, ".usr-ovl-work", 0755, cancellable, error))
return FALSE;
ovl_options = hotfix_ovl_options;
}
break;
case OSTREE_DEPLOYMENT_UNLOCKED_DEVELOPMENT:
{
/* We're just doing transient development/hacking? Okay,
* stick the overlayfs bits in /var/tmp.
*/
char *development_ovldir = strdupa ("/var/tmp/ostree-unlock-ovl.XXXXXX");
const char *development_ovl_upper;
const char *development_ovl_work;
/* Ensure that the directory is created with the same label as `/usr` */
{ g_auto(OstreeSepolicyFsCreatecon) con = { 0, };
if (!_ostree_sepolicy_preparefscreatecon (&con, sepolicy,
"/usr", 0755, error))
return FALSE;
if (!glnx_mkdtempat (AT_FDCWD, development_ovldir, 0755, error))
return FALSE;
}
development_ovl_upper = glnx_strjoina (development_ovldir, "/upper");
if (!glnx_shutil_mkdir_p_at (AT_FDCWD, development_ovl_upper, 0755, cancellable, error))
return FALSE;
development_ovl_work = glnx_strjoina (development_ovldir, "/work");
if (!glnx_shutil_mkdir_p_at (AT_FDCWD, development_ovl_work, 0755, cancellable, error))
return FALSE;
ovl_options = glnx_strjoina ("lowerdir=usr,upperdir=", development_ovl_upper,
",workdir=", development_ovl_work);
}
}
g_assert (ovl_options != NULL);
/* Here we run `mount()` in a fork()ed child because we need to use
* `chdir()` in order to have the mount path options to overlayfs not
* look ugly.
*
* We can't `chdir()` inside a shared library since there may be
* threads, etc.
*/
{
pid_t mount_child = fork ();
if (mount_child < 0)
return glnx_throw_errno_prefix (error, "fork");
else if (mount_child == 0)
{
/* Child process. Do NOT use any GLib API here. */
if (fchdir (deployment_dfd) < 0)
exit (EXIT_FAILURE);
if (mount ("overlay", "/usr", "overlay", 0, ovl_options) < 0)
exit (EXIT_FAILURE);
exit (EXIT_SUCCESS);
}
else
{
/* Parent */
int estatus;
if (TEMP_FAILURE_RETRY (waitpid (mount_child, &estatus, 0)) < 0)
return glnx_throw_errno_prefix (error, "waitpid() on mount helper");
if (!g_spawn_check_exit_status (estatus, error))
return glnx_throw_errno_prefix (error, "overlayfs mount helper");
}
}
g_autoptr(OstreeDeployment) deployment_clone = ostree_deployment_clone (deployment);
GKeyFile *origin_clone = ostree_deployment_get_origin (deployment_clone);
/* Now, write out the flag saying what we did */
switch (unlocked_state)
{
case OSTREE_DEPLOYMENT_UNLOCKED_NONE:
g_assert_not_reached ();
break;
case OSTREE_DEPLOYMENT_UNLOCKED_HOTFIX:
g_key_file_set_string (origin_clone, "origin", "unlocked",
ostree_deployment_unlocked_state_to_string (unlocked_state));
if (!ostree_sysroot_write_origin_file (self, deployment, origin_clone,
cancellable, error))
return FALSE;
break;
case OSTREE_DEPLOYMENT_UNLOCKED_DEVELOPMENT:
{
g_autofree char *devpath = get_unlocked_development_path (deployment);
g_autofree char *devpath_parent = dirname (g_strdup (devpath));
if (!glnx_shutil_mkdir_p_at (AT_FDCWD, devpath_parent, 0755, cancellable, error))
return FALSE;
if (!g_file_set_contents (devpath, "", 0, error))
return FALSE;
}
}
/* For hotfixes we already pushed a rollback which will bump the
* mtime, but we need to bump it again so that clients get the state
* change for this deployment. For development we need to do this
* regardless.
*/
if (!_ostree_sysroot_bump_mtime (self, error))
return FALSE;
return TRUE;
}
| 1 | 11,873 | But if we're on the merge deployment, and the next one is the booted deployment, we'll still be inserting between the merge and booted deployment, right? It seems like we would need e.g. `met_merge` and `met_booted` vars to keep track. | ostreedev-ostree | c |
@@ -1,3 +1,7 @@
-return !axe.commons.aria.isValidRole(node.getAttribute('role'), {
- allowAbstract: true
-});
+return (
+ axe.utils.tokenList(virtualNode.attr('role')).filter(role => {
+ return !axe.commons.aria.isValidRole(role, {
+ allowAbstract: true
+ });
+ }).length > 0
+); | 1 | return !axe.commons.aria.isValidRole(node.getAttribute('role'), {
allowAbstract: true
});
| 1 | 15,258 | Can you put the output of this into data, and list which roles are invalid? Same thing with abstract role. I know that's not strictly part of this PR, but it'd make the messaging about this a bunch better. | dequelabs-axe-core | js |
@@ -20,15 +20,15 @@ import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.tests.acceptance.dsl.WaitUtils;
import org.hyperledger.besu.tests.acceptance.dsl.condition.Condition;
import org.hyperledger.besu.tests.acceptance.dsl.node.Node;
-import org.hyperledger.besu.tests.acceptance.dsl.transaction.ibft2.Ibft2Transactions;
+import org.hyperledger.besu.tests.acceptance.dsl.transaction.bft.BftTransactions;
import java.util.Map;
public class ExpectProposals implements Condition {
- private final Ibft2Transactions ibftTwo;
+ private final BftTransactions ibftTwo;
private final Map<Address, Boolean> proposers;
- public ExpectProposals(final Ibft2Transactions ibftTwo, final Map<Address, Boolean> proposers) {
+ public ExpectProposals(final BftTransactions ibftTwo, final Map<Address, Boolean> proposers) {
this.ibftTwo = ibftTwo;
this.proposers = proposers;
} | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.tests.acceptance.dsl.condition.ibft2;
import static org.assertj.core.api.Assertions.assertThat;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.tests.acceptance.dsl.WaitUtils;
import org.hyperledger.besu.tests.acceptance.dsl.condition.Condition;
import org.hyperledger.besu.tests.acceptance.dsl.node.Node;
import org.hyperledger.besu.tests.acceptance.dsl.transaction.ibft2.Ibft2Transactions;
import java.util.Map;
public class ExpectProposals implements Condition {
private final Ibft2Transactions ibftTwo;
private final Map<Address, Boolean> proposers;
public ExpectProposals(final Ibft2Transactions ibftTwo, final Map<Address, Boolean> proposers) {
this.ibftTwo = ibftTwo;
this.proposers = proposers;
}
@Override
public void verify(final Node node) {
WaitUtils.waitFor(
() -> assertThat(node.execute(ibftTwo.createProposals())).isEqualTo(proposers));
}
}
| 1 | 24,480 | rename ibftTwo arg to bft | hyperledger-besu | java |
@@ -38,7 +38,7 @@ module Blacklight::SearchContext
end
def find_search_session
- if agent_is_crawler?
+ if agent_is_crawler? || skip_session_tracking?
nil
elsif params[:search_context].present?
find_or_initialize_search_session_from_params JSON.parse(params[:search_context]) | 1 | # frozen_string_literal: true
module Blacklight::SearchContext
extend ActiveSupport::Concern
# The following code is executed when someone includes Blacklight::Catalog::SearchSession in their
# own controller.
included do
if respond_to? :helper_method
helper_method :current_search_session, :search_session
end
end
module ClassMethods
# Save the submitted search parameters in the search session
def record_search_parameters opts = { only: :index }
before_action :set_current_search_session, opts
end
end
private
# sets up the session[:search] hash if it doesn't already exist
def search_session
session[:search] ||= {}
# Need to call the getter again. The value is mutated
# https://github.com/rails/rails/issues/23884
session[:search]
end
# The current search session
def current_search_session
@current_search_session ||= find_search_session
end
# Persist the current search session id to the user's session
def set_current_search_session
search_session['id'] = current_search_session.id if current_search_session
end
def find_search_session
if agent_is_crawler?
nil
elsif params[:search_context].present?
find_or_initialize_search_session_from_params JSON.parse(params[:search_context])
elsif params[:search_id].present?
begin
# TODO: check the search id signature.
searches_from_history.find(params[:search_id])
rescue ActiveRecord::RecordNotFound
nil
end
elsif start_new_search_session?
find_or_initialize_search_session_from_params search_state.to_h
elsif search_session['id']
begin
searches_from_history.find(search_session['id'])
rescue ActiveRecord::RecordNotFound
nil
end
end
end
##
# If the current action should start a new search session, this should be
# set to true
def start_new_search_session?
false
end
##
# Determine if the current request is coming from an anonymous bot
# or search crawler
#
def agent_is_crawler?
crawler_proc = blacklight_config.crawler_detector
return false if crawler_proc.nil? || current_user.present?
crawler_proc.call(request)
end
def find_or_initialize_search_session_from_params params
params_copy = params.reject { |k, v| blacklisted_search_session_params.include?(k.to_sym) || v.blank? }
return if params_copy.reject { |k, _v| [:action, :controller].include? k.to_sym }.blank?
saved_search = searches_from_history.find { |x| x.query_params == params_copy }
saved_search || Search.create(query_params: params_copy).tap do |s|
add_to_search_history(s)
end
end
# Add a search to the in-session search history list
def add_to_search_history search
session[:history] ||= []
session[:history].unshift(search.id)
if session[:history].length > blacklight_config.search_history_window
session[:history] = session[:history].slice(0, blacklight_config.search_history_window)
end
end
# A list of query parameters that should not be persisted for a search
def blacklisted_search_session_params
[:commit, :counter, :total, :search_id, :page, :per_page]
end
# calls setup_previous_document then setup_next_document.
# used in the show action for single view pagination.
def setup_next_and_previous_documents
if search_session['counter'] && current_search_session
index = search_session['counter'].to_i - 1
response, documents = search_service.previous_and_next_documents_for_search index, search_state.reset(current_search_session.query_params).to_hash
search_session['total'] = response.total
{ prev: documents.first, next: documents.last }
end
rescue Blacklight::Exceptions::InvalidRequest => e
logger.warn "Unable to setup next and previous documents: #{e}"
nil
end
end
| 1 | 8,427 | Should we go ahead and push `agent_is_crawler?` into the `skip_session_tracking?` method? | projectblacklight-blacklight | rb |
@@ -88,8 +88,11 @@ class CustomDataset(Dataset):
self.proposal_file = osp.join(self.data_root,
self.proposal_file)
# load annotations (and proposals)
- with self.file_client.get_local_path(self.ann_file) as local_path:
- self.data_infos = self.load_annotations(local_path)
+ try:
+ with self.file_client.get_local_path(self.ann_file) as local_path:
+ self.data_infos = self.load_annotations(local_path)
+ except AttributeError:
+ raise AttributeError('Please upgrade mmcv to >= 1.3.16')
if self.proposal_file is not None:
with self.file_client.get_local_path( | 1 | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from torch.utils.data import Dataset
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .pipelines import Compose
@DATASETS.register_module()
class CustomDataset(Dataset):
"""Custom dataset for detection.
The annotation format is shown as follows. The `ann` field is optional for
testing.
.. code-block:: none
[
{
'filename': 'a.jpg',
'width': 1280,
'height': 720,
'ann': {
'bboxes': <np.ndarray> (n, 4) in (x1, y1, x2, y2) order.
'labels': <np.ndarray> (n, ),
'bboxes_ignore': <np.ndarray> (k, 4), (optional field)
'labels_ignore': <np.ndarray> (k, 4) (optional field)
}
},
...
]
Args:
ann_file (str): Annotation file path.
pipeline (list[dict]): Processing pipeline.
classes (str | Sequence[str], optional): Specify classes to load.
If is None, ``cls.CLASSES`` will be used. Default: None.
data_root (str, optional): Data root for ``ann_file``,
``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified.
test_mode (bool, optional): If set True, annotation will not be loaded.
filter_empty_gt (bool, optional): If set true, images without bounding
boxes of the dataset's classes will be filtered out. This option
only works when `test_mode=False`, i.e., we never filter images
during tests.
"""
CLASSES = None
def __init__(self,
ann_file,
pipeline,
classes=None,
data_root=None,
img_prefix='',
seg_prefix=None,
proposal_file=None,
test_mode=False,
filter_empty_gt=True,
file_client_args=dict(backend='disk')):
self.ann_file = ann_file
self.data_root = data_root
self.img_prefix = img_prefix
self.seg_prefix = seg_prefix
self.proposal_file = proposal_file
self.test_mode = test_mode
self.filter_empty_gt = filter_empty_gt
self.CLASSES = self.get_classes(classes)
self.file_client = mmcv.FileClient(**file_client_args)
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.ann_file):
self.ann_file = osp.join(self.data_root, self.ann_file)
if not (self.img_prefix is None or osp.isabs(self.img_prefix)):
self.img_prefix = osp.join(self.data_root, self.img_prefix)
if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
if not (self.proposal_file is None
or osp.isabs(self.proposal_file)):
self.proposal_file = osp.join(self.data_root,
self.proposal_file)
# load annotations (and proposals)
with self.file_client.get_local_path(self.ann_file) as local_path:
self.data_infos = self.load_annotations(local_path)
if self.proposal_file is not None:
with self.file_client.get_local_path(
self.proposal_file) as local_path:
self.proposals = self.load_proposals(local_path)
else:
self.proposals = None
# filter images too small and containing no annotations
if not test_mode:
valid_inds = self._filter_imgs()
self.data_infos = [self.data_infos[i] for i in valid_inds]
if self.proposals is not None:
self.proposals = [self.proposals[i] for i in valid_inds]
# set group flag for the sampler
self._set_group_flag()
# processing pipeline
self.pipeline = Compose(pipeline)
def __len__(self):
"""Total number of samples of data."""
return len(self.data_infos)
def load_annotations(self, ann_file):
"""Load annotation from annotation file."""
return mmcv.load(ann_file)
def load_proposals(self, proposal_file):
"""Load proposal from proposal file."""
return mmcv.load(proposal_file)
def get_ann_info(self, idx):
"""Get annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
return self.data_infos[idx]['ann']
def get_cat_ids(self, idx):
"""Get category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
results['seg_fields'] = []
def _filter_imgs(self, min_size=32):
"""Filter images too small."""
if self.filter_empty_gt:
warnings.warn(
'CustomDataset does not support filtering empty gt images.')
valid_inds = []
for i, img_info in enumerate(self.data_infos):
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _set_group_flag(self):
"""Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
"""
self.flag = np.zeros(len(self), dtype=np.uint8)
for i in range(len(self)):
img_info = self.data_infos[i]
if img_info['width'] / img_info['height'] > 1:
self.flag[i] = 1
def _rand_another(self, idx):
"""Get another random index from the same group as the given index."""
pool = np.where(self.flag == self.flag[idx])[0]
return np.random.choice(pool)
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if `test_mode` is set \
True).
"""
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if data is None:
idx = self._rand_another(idx)
continue
return data
def prepare_train_img(self, idx):
"""Get training data and annotations after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys \
introduced by pipeline.
"""
img_info = self.data_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys introduced by \
pipeline.
"""
img_info = self.data_infos[idx]
results = dict(img_info=img_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
@classmethod
def get_classes(cls, classes=None):
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): If classes is None, use
default CLASSES defined by builtin dataset. If classes is a
string, take it as a file name. The file contains the name of
classes where each line contains one class name. If classes is
a tuple or list, override the CLASSES defined by the dataset.
Returns:
tuple[str] or list[str]: Names of categories of the dataset.
"""
if classes is None:
return cls.CLASSES
if isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
return class_names
def format_results(self, results, **kwargs):
"""Place holder to format result to dataset specific output."""
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.
Default: None.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
if metric == 'mAP':
assert isinstance(iou_thrs, list)
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=scale_ranges,
iou_thr=iou_thr,
dataset=self.CLASSES,
logger=logger)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
for i, num in enumerate(proposal_nums):
for j, iou in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
def __repr__(self):
"""Print the number of instance number."""
dataset_type = 'Test' if self.test_mode else 'Train'
result = (f'\n{self.__class__.__name__} {dataset_type} dataset '
f'with number of images {len(self)}, '
f'and instance counts: \n')
if self.CLASSES is None:
result += 'Category names are not provided. \n'
return result
instance_count = np.zeros(len(self.CLASSES) + 1).astype(int)
# count the instance number in each image
for idx in range(len(self)):
label = self.get_ann_info(idx)['labels']
unique, counts = np.unique(label, return_counts=True)
if len(unique) > 0:
# add the occurrence number to each class
instance_count[unique] += counts
else:
# background is the last index
instance_count[-1] += 1
# create a table with category count
table_data = [['category', 'count'] * 5]
row_data = []
for cls, count in enumerate(instance_count):
if cls < len(self.CLASSES):
row_data += [f'{cls} [{self.CLASSES[cls]}]', f'{count}']
else:
# add the background number
row_data += ['-1 background', f'{count}']
if len(row_data) == 10:
table_data.append(row_data)
row_data = []
if len(row_data) >= 2:
if row_data[-1] == '0':
row_data = row_data[:-2]
if len(row_data) >= 2:
table_data.append([])
table_data.append(row_data)
table = AsciiTable(table_data)
result += table.table
return result
| 1 | 26,799 | How about we raise a warning here and simply change to use self.data_infos = self.load_annotations(self.ann_file) | open-mmlab-mmdetection | py |
@@ -1,9 +1,9 @@
# -*- coding: UTF-8 -*-
-#synthDrivers/_espeak.py
-#A part of NonVisual Desktop Access (NVDA)
-#Copyright (C) 2007-2017 NV Access Limited, Peter Vágner
-#This file is covered by the GNU General Public License.
-#See the file COPYING for more details.
+# synthDrivers/_espeak.py
+# A part of NonVisual Desktop Access (NVDA)
+# Copyright (C) 2007-2020 NV Access Limited, Peter Vágner
+# This file is covered by the GNU General Public License.
+# See the file COPYING for more details.
import time
import nvwave | 1 | # -*- coding: UTF-8 -*-
#synthDrivers/_espeak.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2007-2017 NV Access Limited, Peter Vágner
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import time
import nvwave
import threading
import queue
from ctypes import cdll
from ctypes import *
import config
import globalVars
from logHandler import log
import os
import codecs
isSpeaking = False
onIndexReached = None
bgThread=None
bgQueue = None
player = None
espeakDLL=None
#: Keeps count of the number of bytes pushed for the current utterance.
#: This is necessary because index positions are given as ms since the start of the utterance.
_numBytesPushed = 0
#Parameter bounds
minRate=80
maxRate=450
minPitch=0
maxPitch=99
#event types
espeakEVENT_LIST_TERMINATED=0
espeakEVENT_WORD=1
espeakEVENT_SENTENCE=2
espeakEVENT_MARK=3
espeakEVENT_PLAY=4
espeakEVENT_END=5
espeakEVENT_MSG_TERMINATED=6
espeakEVENT_PHONEME=7
#position types
POS_CHARACTER=1
POS_WORD=2
POS_SENTENCE=3
#output types
AUDIO_OUTPUT_PLAYBACK=0
AUDIO_OUTPUT_RETRIEVAL=1
AUDIO_OUTPUT_SYNCHRONOUS=2
AUDIO_OUTPUT_SYNCH_PLAYBACK=3
#synth flags
espeakCHARS_AUTO=0
espeakCHARS_UTF8=1
espeakCHARS_8BIT=2
espeakCHARS_WCHAR=3
espeakSSML=0x10
espeakPHONEMES=0x100
espeakENDPAUSE=0x1000
espeakKEEP_NAMEDATA=0x2000
#speech parameters
espeakSILENCE=0
espeakRATE=1
espeakVOLUME=2
espeakPITCH=3
espeakRANGE=4
espeakPUNCTUATION=5
espeakCAPITALS=6
espeakWORDGAP=7
espeakOPTIONS=8 # reserved for misc. options. not yet used
espeakINTONATION=9
espeakRESERVED1=10
espeakRESERVED2=11
#error codes
EE_OK=0
#EE_INTERNAL_ERROR=-1
#EE_BUFFER_FULL=1
#EE_NOT_FOUND=2
# eSpeak initialization flags
espeakINITIALIZE_DONT_EXIT = 0x8000
class espeak_EVENT_id(Union):
_fields_=[
('number',c_int),
('name',c_char_p),
('string',c_char*8),
]
class espeak_EVENT(Structure):
_fields_=[
('type',c_int),
('unique_identifier',c_uint),
('text_position',c_int),
('length',c_int),
('audio_position',c_int),
('sample',c_int),
('user_data',c_void_p),
('id',espeak_EVENT_id),
]
class espeak_VOICE(Structure):
_fields_=[
('name',c_char_p),
('languages',c_char_p),
('identifier',c_char_p),
('gender',c_byte),
('age',c_byte),
('variant',c_byte),
('xx1',c_byte),
('score',c_int),
('spare',c_void_p),
]
def __eq__(self, other):
return isinstance(other, type(self)) and addressof(self) == addressof(other)
# As __eq__ was defined on this class, we must provide __hash__ to remain hashable.
# The default hash implementation is fine for our purposes.
def __hash__(self):
return super().__hash__()
# constants that can be returned by espeak_callback
CALLBACK_CONTINUE_SYNTHESIS=0
CALLBACK_ABORT_SYNTHESIS=1
def encodeEspeakString(text):
return text.encode('utf8')
def decodeEspeakString(data):
return data.decode('utf8')
t_espeak_callback=CFUNCTYPE(c_int,POINTER(c_short),c_int,POINTER(espeak_EVENT))
@t_espeak_callback
def callback(wav,numsamples,event):
try:
global player, isSpeaking, _numBytesPushed
if not isSpeaking:
return CALLBACK_ABORT_SYNTHESIS
indexes = []
for e in event:
if e.type==espeakEVENT_MARK:
indexNum = int(decodeEspeakString(e.id.name))
# e.audio_position is ms since the start of this utterance.
# Convert to bytes since the start of the utterance.
BYTES_PER_SAMPLE = 2
MS_PER_SEC = 1000
bytesPerMS = player.samplesPerSec * BYTES_PER_SAMPLE // MS_PER_SEC
indexByte = e.audio_position * bytesPerMS
# Subtract bytes in the utterance that have already been handled
# to give us the byte offset into the samples for this callback.
indexByte -= _numBytesPushed
indexes.append((indexNum, indexByte))
elif e.type==espeakEVENT_LIST_TERMINATED:
break
if not wav:
player.idle()
onIndexReached(None)
isSpeaking = False
return CALLBACK_CONTINUE_SYNTHESIS
wav = string_at(wav, numsamples * sizeof(c_short)) if numsamples>0 else b""
prevByte = 0
for indexNum, indexByte in indexes:
player.feed(wav[prevByte:indexByte],
onDone=lambda indexNum=indexNum: onIndexReached(indexNum))
prevByte = indexByte
if not isSpeaking:
return CALLBACK_ABORT_SYNTHESIS
player.feed(wav[prevByte:])
_numBytesPushed += len(wav)
return CALLBACK_CONTINUE_SYNTHESIS
except:
log.error("callback", exc_info=True)
class BgThread(threading.Thread):
def __init__(self):
super().__init__(name=f"{self.__class__.__module__}.{self.__class__.__qualname__}")
self.setDaemon(True)
def run(self):
global isSpeaking
while True:
func, args, kwargs = bgQueue.get()
if not func:
break
try:
func(*args, **kwargs)
except:
log.error("Error running function from queue", exc_info=True)
bgQueue.task_done()
def _execWhenDone(func, *args, mustBeAsync=False, **kwargs):
global bgQueue
if mustBeAsync or bgQueue.unfinished_tasks != 0:
# Either this operation must be asynchronous or There is still an operation in progress.
# Therefore, run this asynchronously in the background thread.
bgQueue.put((func, args, kwargs))
else:
func(*args, **kwargs)
def _speak(text):
global isSpeaking, _numBytesPushed
uniqueID=c_int()
# if eSpeak was interupted while speaking ssml that changed parameters such as pitch,
# It may not reset those runtime values back to the user-configured values.
# Therefore forcefully cause eSpeak to reset its parameters each time beginning to speak again after not speaking.
if not isSpeaking:
espeakDLL.espeak_ng_Cancel()
isSpeaking = True
_numBytesPushed = 0
# eSpeak can only process compound emojis when using a UTF8 encoding
text=text.encode('utf8',errors='ignore')
flags = espeakCHARS_UTF8 | espeakSSML | espeakPHONEMES
return espeakDLL.espeak_Synth(text,0,0,0,0,flags,byref(uniqueID),0)
def speak(text):
global bgQueue
_execWhenDone(_speak, text, mustBeAsync=True)
def stop():
global isSpeaking, bgQueue
# Kill all speech from now.
# We still want parameter changes to occur, so requeue them.
params = []
try:
while True:
item = bgQueue.get_nowait()
if item[0] != _speak:
params.append(item)
bgQueue.task_done()
except queue.Empty:
# Let the exception break us out of this loop, as queue.empty() is not reliable anyway.
pass
for item in params:
bgQueue.put(item)
isSpeaking = False
player.stop()
def pause(switch):
global player
player.pause(switch)
def setParameter(param,value,relative):
_execWhenDone(espeakDLL.espeak_SetParameter,param,value,relative)
def getParameter(param,current):
return espeakDLL.espeak_GetParameter(param,current)
def getVoiceList():
voices=espeakDLL.espeak_ListVoices(None)
voiceList=[]
for voice in voices:
if not voice: break
voiceList.append(voice.contents)
return voiceList
def getCurrentVoice():
voice = espeakDLL.espeak_GetCurrentVoice()
if voice:
return voice.contents
else:
return None
def setVoice(voice):
# For some weird reason, espeak_EspeakSetVoiceByProperties throws an integer divide by zero exception.
setVoiceByName(voice.identifier)
def setVoiceByName(name):
_execWhenDone(espeakDLL.espeak_SetVoiceByName,encodeEspeakString(name))
def _setVoiceAndVariant(voice=None, variant=None):
v=getCurrentVoice()
res = decodeEspeakString(v.identifier).split("+")
if not voice:
voice = res[0]
if not variant:
if len(res) == 2:
variant = res[1]
else:
variant = "none"
if variant == "none":
espeakDLL.espeak_SetVoiceByName(encodeEspeakString(voice))
else:
try:
espeakDLL.espeak_SetVoiceByName(encodeEspeakString("%s+%s" % (voice, variant)))
except:
espeakDLL.espeak_SetVoiceByName(encodeEspeakString(voice))
def setVoiceAndVariant(voice=None, variant=None):
_execWhenDone(_setVoiceAndVariant, voice=voice, variant=variant)
def _setVoiceByLanguage(lang):
v=espeak_VOICE()
lang=lang.replace('_','-')
v.languages=encodeEspeakString(lang)
try:
espeakDLL.espeak_SetVoiceByProperties(byref(v))
except:
v.languages=encodeEspeakString("en")
espeakDLL.espeak_SetVoiceByProperties(byref(v))
def setVoiceByLanguage(lang):
_execWhenDone(_setVoiceByLanguage, lang)
def espeak_errcheck(res, func, args):
if res != EE_OK:
raise RuntimeError("%s: code %d" % (func.__name__, res))
return res
def initialize(indexCallback=None):
"""
@param indexCallback: A function which is called when eSpeak reaches an index.
It is called with one argument:
the number of the index or C{None} when speech stops.
"""
global espeakDLL, bgThread, bgQueue, player, onIndexReached
espeakDLL = cdll.LoadLibrary(os.path.join(globalVars.appDir, "synthDrivers", "espeak.dll"))
espeakDLL.espeak_Info.restype=c_char_p
espeakDLL.espeak_Synth.errcheck=espeak_errcheck
espeakDLL.espeak_SetVoiceByName.errcheck=espeak_errcheck
espeakDLL.espeak_SetVoiceByProperties.errcheck=espeak_errcheck
espeakDLL.espeak_SetParameter.errcheck=espeak_errcheck
espeakDLL.espeak_Terminate.errcheck=espeak_errcheck
espeakDLL.espeak_ListVoices.restype=POINTER(POINTER(espeak_VOICE))
espeakDLL.espeak_GetCurrentVoice.restype=POINTER(espeak_VOICE)
espeakDLL.espeak_SetVoiceByName.argtypes=(c_char_p,)
eSpeakPath = os.path.join(globalVars.appDir, "synthDrivers")
sampleRate = espeakDLL.espeak_Initialize(
AUDIO_OUTPUT_SYNCHRONOUS, 300,
os.fsencode(eSpeakPath),
# #10607: ensure espeak does not exit NVDA's process on errors such as the espeak path being invalid.
espeakINITIALIZE_DONT_EXIT
)
if sampleRate <= 0:
raise OSError(f"espeak_Initialize failed with code {sampleRate}. Given Espeak data path of {eSpeakPath}")
player = nvwave.WavePlayer(
channels=1,
samplesPerSec=sampleRate,
bitsPerSample=16,
outputDevice=config.conf["speech"]["outputDevice"],
buffered=True
)
onIndexReached = indexCallback
espeakDLL.espeak_SetSynthCallback(callback)
bgQueue = queue.Queue()
bgThread=BgThread()
bgThread.start()
def terminate():
global bgThread, bgQueue, player, espeakDLL , onIndexReached
stop()
bgQueue.put((None, None, None))
bgThread.join()
espeakDLL.espeak_Terminate()
bgThread=None
bgQueue=None
player.close()
player=None
espeakDLL=None
onIndexReached = None
def info():
return espeakDLL.espeak_Info()
def getVariantDict():
dir = os.path.join(globalVars.appDir, "synthDrivers", "espeak-ng-data", "voices", "!v")
# Translators: name of the default espeak varient.
variantDict={"none": pgettext("espeakVarient", "none")}
for fileName in os.listdir(dir):
absFilePath = os.path.join(dir, fileName)
if os.path.isfile(absFilePath):
# In python 3, open assumes the default system encoding by default.
# This fails if Windows' "use Unicode UTF-8 for worldwide language support" option is enabled.
# The expected encoding is unknown, therefore use latin-1 to stay as close to Python 2 behavior as possible.
try:
with open(absFilePath, 'r', encoding="latin-1") as file:
for line in file:
if line.startswith('name '):
temp=line.split(" ")
if len(temp) ==2:
name=temp[1].rstrip()
break
name=None
except:
log.error("Couldn't parse espeak variant file %s" % fileName, exc_info=True)
continue
if name is not None:
variantDict[fileName]=name
return variantDict
| 1 | 31,544 | Please remove this line to comply with the standard for copyright headers. | nvaccess-nvda | py |
@@ -21,6 +21,9 @@ namespace Nethermind.Consensus.AuRa.Validators
{
internal static class AuRaValidatorsCollectionExtensions
{
- public static int MinSealersForFinalization(this IList<Address> validators) => validators.Count / 2 + 1;
+ public static int MinSealersForFinalization(this IList<Address> validators, bool twoThirds = false)
+ {
+ return (twoThirds ? validators.Count * 2 / 3 : validators.Count / 2) + 1;
+ }
}
} | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System.Collections.Generic;
using Nethermind.Core;
namespace Nethermind.Consensus.AuRa.Validators
{
internal static class AuRaValidatorsCollectionExtensions
{
public static int MinSealersForFinalization(this IList<Address> validators) => validators.Count / 2 + 1;
}
} | 1 | 23,864 | seems incorrect - for 5 validators it will tell that 3 is enough to seal and you need 4 2/3 * 5 is 3.3 | NethermindEth-nethermind | .cs |
@@ -19,11 +19,16 @@
package org.apache.iceberg.hive;
-import java.io.Closeable;
-import java.util.Arrays;
+
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.hive;
import java.io.Closeable;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.iceberg.BaseMetastoreCatalog;
import org.apache.iceberg.BaseMetastoreTableOperations;
import org.apache.iceberg.CatalogProperties;
import org.apache.iceberg.CatalogUtil;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.SupportsNamespaces;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.exceptions.NamespaceNotEmptyException;
import org.apache.iceberg.exceptions.NoSuchNamespaceException;
import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.hadoop.HadoopFileIO;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.relocated.com.google.common.base.Joiner;
import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.util.PropertyUtil;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class HiveCatalog extends BaseMetastoreCatalog implements Closeable, SupportsNamespaces, Configurable {
private static final Logger LOG = LoggerFactory.getLogger(HiveCatalog.class);
private String name;
private HiveClientPool clients;
private Configuration conf;
private StackTraceElement[] createStack;
private FileIO fileIO;
private boolean closed;
public HiveCatalog() {
}
/**
* Hive Catalog constructor.
*
* @deprecated please use the no-arg constructor, setConf and initialize to construct the catalog. Will be removed in
* v0.13.0
* @param conf Hadoop Configuration
*/
@Deprecated
public HiveCatalog(Configuration conf) {
this.name = "hive";
int clientPoolSize = conf.getInt(CatalogProperties.CLIENT_POOL_SIZE, CatalogProperties.CLIENT_POOL_SIZE_DEFAULT);
this.clients = new HiveClientPool(clientPoolSize, conf);
this.conf = conf;
this.createStack = Thread.currentThread().getStackTrace();
this.closed = false;
this.fileIO = new HadoopFileIO(conf);
}
@Override
public void initialize(String inputName, Map<String, String> properties) {
this.name = inputName;
if (properties.containsKey(CatalogProperties.URI)) {
this.conf.set(HiveConf.ConfVars.METASTOREURIS.varname, properties.get(CatalogProperties.URI));
}
if (properties.containsKey(CatalogProperties.WAREHOUSE_LOCATION)) {
this.conf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, properties.get(CatalogProperties.WAREHOUSE_LOCATION));
}
int clientPoolSize = PropertyUtil.propertyAsInt(properties,
CatalogProperties.CLIENT_POOL_SIZE, CatalogProperties.CLIENT_POOL_SIZE_DEFAULT);
this.clients = new HiveClientPool(clientPoolSize, this.conf);
this.createStack = Thread.currentThread().getStackTrace();
this.closed = false;
String fileIOImpl = properties.get(CatalogProperties.FILE_IO_IMPL);
this.fileIO = fileIOImpl == null ? new HadoopFileIO(conf) : CatalogUtil.loadFileIO(fileIOImpl, properties, conf);
}
@Override
public List<TableIdentifier> listTables(Namespace namespace) {
Preconditions.checkArgument(isValidateNamespace(namespace),
"Missing database in namespace: %s", namespace);
String database = namespace.level(0);
try {
List<String> tableNames = clients.run(client -> client.getAllTables(database));
List<Table> tableObjects = clients.run(client -> client.getTableObjectsByName(database, tableNames));
List<TableIdentifier> tableIdentifiers = tableObjects.stream()
.filter(table -> table.getParameters() == null ? false : BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE
.equalsIgnoreCase(table.getParameters().get(BaseMetastoreTableOperations.TABLE_TYPE_PROP)))
.map(table -> TableIdentifier.of(namespace, table.getTableName()))
.collect(Collectors.toList());
LOG.debug("Listing of namespace: {} resulted in the following tables: {}", namespace, tableIdentifiers);
return tableIdentifiers;
} catch (UnknownDBException e) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
} catch (TException e) {
throw new RuntimeException("Failed to list all tables under namespace " + namespace, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to listTables", e);
}
}
@Override
public String name() {
return name;
}
@Override
public boolean dropTable(TableIdentifier identifier, boolean purge) {
if (!isValidIdentifier(identifier)) {
return false;
}
String database = identifier.namespace().level(0);
TableOperations ops = newTableOps(identifier);
TableMetadata lastMetadata;
if (purge && ops.current() != null) {
lastMetadata = ops.current();
} else {
lastMetadata = null;
}
try {
clients.run(client -> {
client.dropTable(database, identifier.name(),
false /* do not delete data */,
false /* throw NoSuchObjectException if the table doesn't exist */);
return null;
});
if (purge && lastMetadata != null) {
CatalogUtil.dropTableData(ops.io(), lastMetadata);
}
LOG.info("Dropped table: {}", identifier);
return true;
} catch (NoSuchTableException | NoSuchObjectException e) {
LOG.info("Skipping drop, table does not exist: {}", identifier, e);
return false;
} catch (TException e) {
throw new RuntimeException("Failed to drop " + identifier, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to dropTable", e);
}
}
@Override
public void renameTable(TableIdentifier from, TableIdentifier originalTo) {
if (!isValidIdentifier(from)) {
throw new NoSuchTableException("Invalid identifier: %s", from);
}
TableIdentifier to = removeCatalogName(originalTo);
Preconditions.checkArgument(isValidIdentifier(to), "Invalid identifier: %s", to);
String toDatabase = to.namespace().level(0);
String fromDatabase = from.namespace().level(0);
String fromName = from.name();
try {
Table table = clients.run(client -> client.getTable(fromDatabase, fromName));
HiveTableOperations.validateTableIsIceberg(table, fullTableName(name, from));
table.setDbName(toDatabase);
table.setTableName(to.name());
clients.run(client -> {
client.alter_table(fromDatabase, fromName, table);
return null;
});
LOG.info("Renamed table from {}, to {}", from, to);
} catch (NoSuchObjectException e) {
throw new NoSuchTableException("Table does not exist: %s", from);
} catch (AlreadyExistsException e) {
throw new org.apache.iceberg.exceptions.AlreadyExistsException("Table already exists: %s", to);
} catch (TException e) {
throw new RuntimeException("Failed to rename " + from + " to " + to, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to rename", e);
}
}
@Override
public void createNamespace(Namespace namespace, Map<String, String> meta) {
Preconditions.checkArgument(
!namespace.isEmpty(),
"Cannot create namespace with invalid name: %s", namespace);
Preconditions.checkArgument(isValidateNamespace(namespace),
"Cannot support multi part namespace in Hive MetaStore: %s", namespace);
try {
clients.run(client -> {
client.createDatabase(convertToDatabase(namespace, meta));
return null;
});
LOG.info("Created namespace: {}", namespace);
} catch (AlreadyExistsException e) {
throw new org.apache.iceberg.exceptions.AlreadyExistsException(e, "Namespace '%s' already exists!",
namespace);
} catch (TException e) {
throw new RuntimeException("Failed to create namespace " + namespace + " in Hive MataStore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(
"Interrupted in call to createDatabase(name) " + namespace + " in Hive MataStore", e);
}
}
@Override
public List<Namespace> listNamespaces(Namespace namespace) {
if (!isValidateNamespace(namespace) && !namespace.isEmpty()) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
}
if (!namespace.isEmpty()) {
return ImmutableList.of();
}
try {
List<Namespace> namespaces = clients.run(HiveMetaStoreClient::getAllDatabases)
.stream()
.map(Namespace::of)
.collect(Collectors.toList());
LOG.debug("Listing namespace {} returned tables: {}", namespace, namespaces);
return namespaces;
} catch (TException e) {
throw new RuntimeException("Failed to list all namespace: " + namespace + " in Hive MataStore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(
"Interrupted in call to getAllDatabases() " + namespace + " in Hive MataStore", e);
}
}
@Override
public boolean dropNamespace(Namespace namespace) {
if (!isValidateNamespace(namespace)) {
return false;
}
try {
clients.run(client -> {
client.dropDatabase(namespace.level(0),
false /* deleteData */,
false /* ignoreUnknownDb */,
false /* cascade */);
return null;
});
LOG.info("Dropped namespace: {}", namespace);
return true;
} catch (InvalidOperationException e) {
throw new NamespaceNotEmptyException(e, "Namespace %s is not empty. One or more tables exist.", namespace);
} catch (NoSuchObjectException e) {
return false;
} catch (TException e) {
throw new RuntimeException("Failed to drop namespace " + namespace + " in Hive MataStore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(
"Interrupted in call to drop dropDatabase(name) " + namespace + " in Hive MataStore", e);
}
}
@Override
public boolean setProperties(Namespace namespace, Map<String, String> properties) {
Map<String, String> parameter = Maps.newHashMap();
parameter.putAll(loadNamespaceMetadata(namespace));
parameter.putAll(properties);
Database database = convertToDatabase(namespace, parameter);
alterHiveDataBase(namespace, database);
LOG.debug("Successfully set properties {} for {}", properties.keySet(), namespace);
// Always successful, otherwise exception is thrown
return true;
}
@Override
public boolean removeProperties(Namespace namespace, Set<String> properties) {
Map<String, String> parameter = Maps.newHashMap();
parameter.putAll(loadNamespaceMetadata(namespace));
properties.forEach(key -> parameter.put(key, null));
Database database = convertToDatabase(namespace, parameter);
alterHiveDataBase(namespace, database);
LOG.debug("Successfully removed properties {} from {}", properties, namespace);
// Always successful, otherwise exception is thrown
return true;
}
private void alterHiveDataBase(Namespace namespace, Database database) {
try {
clients.run(client -> {
client.alterDatabase(namespace.level(0), database);
return null;
});
} catch (NoSuchObjectException | UnknownDBException e) {
throw new NoSuchNamespaceException(e, "Namespace does not exist: %s", namespace);
} catch (TException e) {
throw new RuntimeException(
"Failed to list namespace under namespace: " + namespace + " in Hive MataStore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to getDatabase(name) " + namespace + " in Hive MataStore", e);
}
}
@Override
public Map<String, String> loadNamespaceMetadata(Namespace namespace) {
if (!isValidateNamespace(namespace)) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
}
try {
Database database = clients.run(client -> client.getDatabase(namespace.level(0)));
Map<String, String> metadata = convertToMetadata(database);
LOG.debug("Loaded metadata for namespace {} found {}", namespace, metadata.keySet());
return metadata;
} catch (NoSuchObjectException | UnknownDBException e) {
throw new NoSuchNamespaceException(e, "Namespace does not exist: %s", namespace);
} catch (TException e) {
throw new RuntimeException("Failed to list namespace under namespace: " + namespace + " in Hive MataStore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(
"Interrupted in call to getDatabase(name) " + namespace + " in Hive MataStore", e);
}
}
@Override
protected boolean isValidIdentifier(TableIdentifier tableIdentifier) {
return tableIdentifier.namespace().levels().length == 1;
}
private TableIdentifier removeCatalogName(TableIdentifier to) {
if (isValidIdentifier(to)) {
return to;
}
// check if the identifier includes the catalog name and remove it
if (to.namespace().levels().length == 2 && name().equalsIgnoreCase(to.namespace().level(0))) {
return TableIdentifier.of(Namespace.of(to.namespace().level(1)), to.name());
}
// return the original unmodified
return to;
}
private boolean isValidateNamespace(Namespace namespace) {
return namespace.levels().length == 1;
}
@Override
public TableOperations newTableOps(TableIdentifier tableIdentifier) {
String dbName = tableIdentifier.namespace().level(0);
String tableName = tableIdentifier.name();
return new HiveTableOperations(conf, clients, fileIO, name, dbName, tableName);
}
@Override
protected String defaultWarehouseLocation(TableIdentifier tableIdentifier) {
// This is a little edgy since we basically duplicate the HMS location generation logic.
// Sadly I do not see a good way around this if we want to keep the order of events, like:
// - Create meta files
// - Create the metadata in HMS, and this way committing the changes
// Create a new location based on the namespace / database if it is set on database level
try {
Database databaseData = clients.run(client -> client.getDatabase(tableIdentifier.namespace().levels()[0]));
if (databaseData.getLocationUri() != null) {
// If the database location is set use it as a base.
return String.format("%s/%s", databaseData.getLocationUri(), tableIdentifier.name());
}
} catch (TException e) {
throw new RuntimeException(String.format("Metastore operation failed for %s", tableIdentifier), e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted during commit", e);
}
// Otherwise stick to the {WAREHOUSE_DIR}/{DB_NAME}.db/{TABLE_NAME} path
String warehouseLocation = getWarehouseLocation();
return String.format(
"%s/%s.db/%s",
warehouseLocation,
tableIdentifier.namespace().levels()[0],
tableIdentifier.name());
}
private String getWarehouseLocation() {
String warehouseLocation = conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname);
Preconditions.checkNotNull(warehouseLocation, "Warehouse location is not set: hive.metastore.warehouse.dir=null");
return warehouseLocation;
}
private Map<String, String> convertToMetadata(Database database) {
Map<String, String> meta = Maps.newHashMap();
meta.putAll(database.getParameters());
meta.put("location", database.getLocationUri());
if (database.getDescription() != null) {
meta.put("comment", database.getDescription());
}
return meta;
}
Database convertToDatabase(Namespace namespace, Map<String, String> meta) {
if (!isValidateNamespace(namespace)) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
}
Database database = new Database();
Map<String, String> parameter = Maps.newHashMap();
database.setName(namespace.level(0));
database.setLocationUri(new Path(getWarehouseLocation(), namespace.level(0)).toString() + ".db");
meta.forEach((key, value) -> {
if (key.equals("comment")) {
database.setDescription(value);
} else if (key.equals("location")) {
database.setLocationUri(value);
} else {
if (value != null) {
parameter.put(key, value);
}
}
});
database.setParameters(parameter);
return database;
}
@Override
public void close() {
if (!closed) {
clients.close();
closed = true;
}
}
@SuppressWarnings("checkstyle:NoFinalizer")
@Override
protected void finalize() throws Throwable {
super.finalize();
// todo it is possible that the Catalog is gc-ed before child table operations are done w/ the clients object.
// The closing of the HiveCatalog should take that into account and child TableOperations should own the clients obj
// or the TabaleOperations should be explicitly closed and the Catalog can't be gc-ed/closed till all children are.
if (!closed) {
close(); // releasing resources is more important than printing the warning
String trace = Joiner.on("\n\t").join(
Arrays.copyOfRange(createStack, 1, createStack.length));
LOG.warn("Unclosed input stream created by:\n\t{}", trace);
}
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("name", name)
.add("uri", this.conf.get(HiveConf.ConfVars.METASTOREURIS.varname))
.toString();
}
@Override
public void setConf(Configuration conf) {
this.conf = new Configuration(conf);
}
@Override
public Configuration getConf() {
return conf;
}
}
| 1 | 34,823 | Nit: please remove unnecessary whitespace changes. | apache-iceberg | java |
@@ -73,9 +73,16 @@ public class NodeJSSampleMethodToViewTransformer implements SampleMethodToViewTr
List<SampleFieldView> fields = new ArrayList<>();
for (FieldInfo field : methodInfo.fields().values()) {
+ String name = field.name();
+ // Since the requestBody is named `resource`, all fields named `resource`
+ // are renamed by the Node.js client library generator to `resource_`.
+ if (name.equals("resource")) {
+ name = "resource_";
+ }
+
fields.add(
SampleFieldView.newBuilder()
- .name(field.name())
+ .name(name)
.defaultValue(typeTable.getZeroValueAndSaveNicknameFor(field.type()))
.example(field.example())
.description(field.description()) | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.discovery.transformer.nodejs;
import com.google.api.codegen.discovery.config.FieldInfo;
import com.google.api.codegen.discovery.config.MethodInfo;
import com.google.api.codegen.discovery.config.SampleConfig;
import com.google.api.codegen.discovery.transformer.SampleMethodToViewTransformer;
import com.google.api.codegen.discovery.transformer.SampleNamer;
import com.google.api.codegen.discovery.transformer.SampleTransformerContext;
import com.google.api.codegen.discovery.transformer.SampleTypeTable;
import com.google.api.codegen.discovery.viewmodel.SampleAuthView;
import com.google.api.codegen.discovery.viewmodel.SampleFieldView;
import com.google.api.codegen.discovery.viewmodel.SamplePageStreamingView;
import com.google.api.codegen.discovery.viewmodel.SampleView;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.SymbolTable;
import com.google.api.codegen.util.nodejs.NodeJSNameFormatter;
import com.google.api.codegen.util.nodejs.NodeJSTypeTable;
import com.google.api.codegen.viewmodel.ViewModel;
import com.google.protobuf.Method;
import java.util.ArrayList;
import java.util.List;
public class NodeJSSampleMethodToViewTransformer implements SampleMethodToViewTransformer {
private static final String TEMPLATE_FILENAME = "nodejs/sample.snip";
public NodeJSSampleMethodToViewTransformer() {}
@Override
public ViewModel transform(Method method, SampleConfig sampleConfig) {
SampleTypeTable sampleTypeTable =
new SampleTypeTable(new NodeJSTypeTable(""), new NodeJSSampleTypeNameConverter());
NodeJSSampleNamer nodeJsSampleNamer = new NodeJSSampleNamer();
SampleTransformerContext context =
SampleTransformerContext.create(
sampleConfig, sampleTypeTable, nodeJsSampleNamer, method.getName());
return createSampleView(context);
}
private SampleView createSampleView(SampleTransformerContext context) {
SampleConfig config = context.getSampleConfig();
MethodInfo methodInfo = config.methods().get(context.getMethodName());
SampleNamer namer = context.getSampleNamer();
SampleTypeTable typeTable = context.getSampleTypeTable();
SymbolTable symbolTable = SymbolTable.fromSeed(NodeJSNameFormatter.RESERVED_IDENTIFIER_SET);
SampleView.Builder builder = SampleView.newBuilder();
String serviceVarName = symbolTable.getNewSymbol(namer.getServiceVarName(config.apiTypeName()));
String serviceTypeName = typeTable.getAndSaveNicknameForServiceType(config.apiTypeName());
String requestVarName = symbolTable.getNewSymbol(namer.getRequestVarName());
if (methodInfo.isPageStreaming()) {
builder.pageStreaming(createSamplePageStreamingView(context, symbolTable));
}
// Created before the fields in case there are naming conflicts in the symbol table.
SampleAuthView sampleAuthView = createSampleAuthView(context, symbolTable);
List<SampleFieldView> fields = new ArrayList<>();
for (FieldInfo field : methodInfo.fields().values()) {
fields.add(
SampleFieldView.newBuilder()
.name(field.name())
.defaultValue(typeTable.getZeroValueAndSaveNicknameFor(field.type()))
.example(field.example())
.description(field.description())
.build());
}
boolean hasResponse = methodInfo.responseType() != null;
if (hasResponse) {
builder.responseVarName(symbolTable.getNewSymbol(namer.getResponseVarName()));
}
return builder
.templateFileName(TEMPLATE_FILENAME)
.outputPath(context.getMethodName() + ".frag.njs")
.apiTitle(config.apiTitle())
.apiName(config.apiName())
.apiVersion(config.apiVersion())
.auth(sampleAuthView)
.serviceVarName(serviceVarName)
.serviceTypeName(serviceTypeName)
.methodVerb(methodInfo.verb())
.methodNameComponents(methodInfo.nameComponents())
.requestVarName(requestVarName)
.hasRequestBody(methodInfo.requestBodyType() != null)
.hasResponse(hasResponse)
.fields(fields)
.isPageStreaming(methodInfo.isPageStreaming())
.hasMediaUpload(methodInfo.hasMediaUpload())
.hasMediaDownload(methodInfo.hasMediaDownload())
.googleImportVarName(
symbolTable.getNewSymbol(namer.localVarName(Name.lowerCamel("google"))))
.build();
}
private SampleAuthView createSampleAuthView(
SampleTransformerContext context, SymbolTable symbolTable) {
SampleConfig config = context.getSampleConfig();
MethodInfo methodInfo = config.methods().get(context.getMethodName());
SampleNamer namer = context.getSampleNamer();
String authVarName = "";
switch (config.authType()) {
case API_KEY:
authVarName = "apiKey";
break;
default:
authVarName = "authClient";
}
return SampleAuthView.newBuilder()
.type(config.authType())
.instructionsUrl(config.authInstructionsUrl())
.scopes(methodInfo.authScopes())
.isScopesSingular(methodInfo.authScopes().size() == 1)
.authFuncName(
symbolTable.getNewSymbol(namer.staticFunctionName(Name.lowerCamel("authorize"))))
.authVarName(symbolTable.getNewSymbol(namer.localVarName(Name.lowerCamel(authVarName))))
.build();
}
private SamplePageStreamingView createSamplePageStreamingView(
SampleTransformerContext context, SymbolTable symbolTable) {
MethodInfo methodInfo = context.getSampleConfig().methods().get(context.getMethodName());
FieldInfo fieldInfo = methodInfo.pageStreamingResourceField();
SampleNamer namer = context.getSampleNamer();
if (fieldInfo == null) {
throw new IllegalArgumentException("pageStreamingResourceField cannot be null");
}
SamplePageStreamingView.Builder builder = SamplePageStreamingView.newBuilder();
if (fieldInfo.type().isMap()) {
builder.resourceKeyVarName(
symbolTable.getNewSymbol(namer.localVarName(Name.lowerCamel("name"))));
}
builder.resourceFieldName(namer.getFieldVarName(fieldInfo.name()));
builder.isResourceMap(fieldInfo.type().isMap());
builder.handlePageVarName(
symbolTable.getNewSymbol(namer.localVarName(Name.lowerCamel("handlePage"))));
builder.pageVarName(
symbolTable.getNewSymbol(namer.localVarName(Name.lowerCamel(fieldInfo.name(), "page"))));
return builder.build();
}
}
| 1 | 20,212 | Is this captured in a test? | googleapis-gapic-generator | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.