content
stringlengths 10
4.9M
|
---|
// If the ROS node is closed through SIGINT (ctrl-c) then this function will catch the interrupt
// and call the RecordPeopleToFile function before closing the node
void SIGINT_Handler(int s){
RecordPeopleToFile();
ros::shutdown();
} |
<gh_stars>0
// https://leetcode.com/problems/binary-tree-maximum-path-sum/
/**
* Definition for a binary tree node.
* struct TreeNode {
* int val;
* TreeNode *left;
* TreeNode *right;
* TreeNode() : val(0), left(nullptr), right(nullptr) {}
* TreeNode(int x) : val(x), left(nullptr), right(nullptr) {}
* TreeNode(int x, TreeNode *left, TreeNode *right) : val(x), left(left), right(right) {}
* };
*/
// Use of kadane's algo on tree
class Solution {
public:
int ans;
int findPathSum(TreeNode* root){
if(!root) return 0;
int lSum = findPathSum(root->left);
int rSum = findPathSum(root->right);
int tempMax = root->val;
if(lSum>0) tempMax += lSum;
if(rSum>0) tempMax += rSum;
ans = max(ans, tempMax);
return max(0, max(root->val, max(root->val+lSum, root->val+rSum)));
}
int maxPathSum(TreeNode* root) {
ans=INT_MIN;
findPathSum(root);
return ans;
}
}; |
<reponame>proximax-storage/xpx-java-sdk
/*
* Copyright 2018 ProximaX Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Proximax REST API
* Proximax REST API
*
* OpenAPI spec version: v0.0.1
*
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/swagger-api/swagger-codegen.git
* Do not edit the class manually.
*/
package io.proximax.xpx.remote;
import io.proximax.xpx.exceptions.ApiException;
import io.proximax.xpx.model.GenericResponseMessage;
import io.proximax.xpx.model.NodeInfo;
import io.proximax.xpx.service.remote.RemoteNodeApi;
import org.junit.Test;
import org.junit.Assert;
import org.junit.Ignore;
/**
* API tests for NodeApi.
*/
@Ignore
public class RemoteNodeApiTest extends AbstractApiTest {
/** The api. */
private final RemoteNodeApi api = new RemoteNodeApi(apiClient);
/**
* Check if the Storage Node is up and running.
*
* This endpoint is used to check if the P2P Storage Node instance is either alive or down.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void checkNodeUsingGETTest() throws ApiException {
GenericResponseMessage response = api.checkNodeUsingGET();
Assert.assertNotNull(response);
}
/**
* Get Storage Node Information
*
* This endpoint returns the information of the P2P Storage Node.
*
* @throws ApiException if the Api call fails
*/
@Test
public void getNodeInfoUsingGETTest() throws ApiException {
NodeInfo response = api.getNodeInfoUsingGET();
Assert.assertNotNull(response);
}
/**
* Get Storage Node Information
*
* This endpoint returns the information of the P2P Storage Node.
*
* @throws ApiException if the Api call fails
*/
@Test
@Ignore("This test can only be ran if you're running the node locally. e.i: set the api client base url to localhost")
public void setBlockchainNodeConnectionUsingPOSTTest() throws ApiException {
String network = null;
String domain = null;
String port = null;
String response = api.setBlockchainNodeConnectionUsingPOST(network, domain, port);
Assert.assertNotNull(response);
}
}
|
<filename>pkg/hacontroller/controller_test.go<gh_stars>1-10
package hacontroller_test
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"github.com/piraeusdatastore/piraeus-ha-controller/pkg/hacontroller"
)
const (
fakeVAName = "fake-va"
fakePVName = "fake-pv"
fakePVCName = "fake-pvc"
fakePodWithVolumeName = "fake-pod-with-volume"
fakeNamespace = "fake"
fakeAttacher = "csi.fake.k8s.io"
fakeNode = "node1.fake.k8s.io"
)
func initialKubeClient() kubernetes.Interface {
podWithVolume := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fakePodWithVolumeName,
Namespace: fakeNamespace,
},
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{
Name: "foo",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: fakePVCName,
},
},
},
},
},
}
pvc := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: fakePVCName,
Namespace: fakeNamespace,
},
Spec: corev1.PersistentVolumeClaimSpec{
VolumeName: fakePVName,
},
}
va := &storagev1.VolumeAttachment{
ObjectMeta: metav1.ObjectMeta{
Name: fakeVAName,
},
Spec: storagev1.VolumeAttachmentSpec{
Attacher: fakeAttacher,
NodeName: fakeNode,
Source: storagev1.VolumeAttachmentSource{
PersistentVolumeName: &pvc.Spec.VolumeName,
},
},
}
return fake.NewSimpleClientset(
podWithVolume,
pvc,
va,
)
}
func TestHaController_Run_Basic(t *testing.T) {
kubeClient := initialKubeClient()
lostResource := make(chan string, 1)
lostResource <- fakePVName
haController := hacontroller.NewHAController("test", kubeClient, lostResource, hacontroller.WithReconcileInterval(50*time.Millisecond))
ctx := context.Background()
reconcileCtx, reconcileStop := context.WithTimeout(ctx, 200*time.Millisecond)
defer reconcileStop()
err := haController.Run(reconcileCtx)
assert.EqualError(t, err, "context deadline exceeded")
_, err = kubeClient.CoreV1().Pods(fakeNamespace).Get(ctx, fakePodWithVolumeName, metav1.GetOptions{})
assert.True(t, errors.IsNotFound(err), "pod should be deleted")
_, err = kubeClient.StorageV1().VolumeAttachments().Get(ctx, fakeVAName, metav1.GetOptions{})
assert.True(t, errors.IsNotFound(err), "va should be deleted")
}
func TestHaController_Run_WithAttacherName(t *testing.T) {
kubeClient := initialKubeClient()
lostResource := make(chan string, 1)
lostResource <- fakePVName
haController := hacontroller.NewHAController("test", kubeClient, lostResource,
hacontroller.WithReconcileInterval(50*time.Millisecond),
hacontroller.WithAttacherName(fakeAttacher),
)
ctx := context.Background()
reconcileCtx, reconcileStop := context.WithTimeout(ctx, 200*time.Millisecond)
defer reconcileStop()
err := haController.Run(reconcileCtx)
assert.EqualError(t, err, "context deadline exceeded")
_, err = kubeClient.CoreV1().Pods(fakeNamespace).Get(ctx, fakePodWithVolumeName, metav1.GetOptions{})
assert.True(t, errors.IsNotFound(err), "pod should be deleted")
_, err = kubeClient.StorageV1().VolumeAttachments().Get(ctx, fakeVAName, metav1.GetOptions{})
assert.True(t, errors.IsNotFound(err), "va should be deleted")
}
func TestHaController_Run_WithWrongAttacherName(t *testing.T) {
kubeClient := initialKubeClient()
lostResource := make(chan string, 1)
lostResource <- fakePVName
haController := hacontroller.NewHAController("test", kubeClient, lostResource,
hacontroller.WithReconcileInterval(50*time.Millisecond),
hacontroller.WithAttacherName(fakeAttacher+"something wrong"),
)
ctx := context.Background()
reconcileCtx, reconcileStop := context.WithTimeout(ctx, 200*time.Millisecond)
defer reconcileStop()
err := haController.Run(reconcileCtx)
assert.EqualError(t, err, "context deadline exceeded")
_, err = kubeClient.CoreV1().Pods(fakeNamespace).Get(ctx, fakePodWithVolumeName, metav1.GetOptions{})
assert.NoError(t, err, "pod should not be deleted")
_, err = kubeClient.StorageV1().VolumeAttachments().Get(ctx, fakeVAName, metav1.GetOptions{})
assert.NoError(t, err, "va should not be deleted")
}
type neverLeaderElector struct{}
func (*neverLeaderElector) IsLeader() bool {
return false
}
func TestHaController_Run_NoChangeIfNotLeader(t *testing.T) {
kubeClient := initialKubeClient()
lostResource := make(chan string, 1)
lostResource <- fakePVName
haController := hacontroller.NewHAController("test", kubeClient, lostResource,
hacontroller.WithReconcileInterval(50*time.Millisecond),
hacontroller.WithLeaderElector(&neverLeaderElector{}),
)
ctx := context.Background()
reconcileCtx, reconcileStop := context.WithTimeout(ctx, 200*time.Millisecond)
defer reconcileStop()
err := haController.Run(reconcileCtx)
assert.EqualError(t, err, "context deadline exceeded")
_, err = kubeClient.CoreV1().Pods(fakeNamespace).Get(ctx, fakePodWithVolumeName, metav1.GetOptions{})
assert.NoError(t, err, "pod should not be deleted")
_, err = kubeClient.StorageV1().VolumeAttachments().Get(ctx, fakeVAName, metav1.GetOptions{})
assert.NoError(t, err, "va should not be deleted")
}
|
<filename>src/power_saver.h
#ifndef power_saver_h
#define power_saver_h
#include <mono.h>
namespace mono {
/**
* @brief Auto dim the display and sleep Mono after a given period of inactivity.
*
* This class will automatically dim the display and enter sleep mode,
* after a period of user inactivity. The uses 3 states:
*
* 1. Normal mode (full brightness on display)
* 2. After a period of no user touch input (default 10 secs.),
* display dims to 11%
* 3. After yet a period (default 10 secs.) of no touch input, sleep
* mode is entered.
*
* Any touch input will reset the state to *1.*
*
* You should add this objact to your *AppController* class, to enable
* automatic power saving.
*
* #### Multiple PowerSavers
*
* If you want to use multiple instances of this class, remember to @ref
* deactivate the instances that should be inactive. Having two active
* instances at one time, can be confusing to the users.
*
* #### Catch-all Touches
*
* TO capture all touches, any instane should be the first to respond to
* touch inputs. The touch system has a responder chain of objects that
* handles touch input. Any of these objects might choose to break the chain
* and allow no further processing of a given touch event.
*
* Therefore the PowerSaver must be the first (or one of) to handle touch
* events. You accomplish this by initializing the PowerSaver object as the
* first member object of your AppController. Before any other touch related
* classes.
*
*/
class PowerSaver : public TouchResponder
{
protected:
Timer dimTimer, sleepTimer;
int dimBright;
int fullBright;
bool enabled;
void dimStep();
void sleepStep();
void undimStep();
public:
/**
* @brief Construct a auto dimmer and sleep timer.
*
* @param dimTimeoutMs The delay before display is dimmed (in milliseconds)
* @param sleepTimeoutMs The delay before sleep mode is entered (in milliseconds)
* @param dimBrightness The brightness level when display is dimmed (0 to 255)
* @param fullBrightness The full brightness level (normal mode) (0 to 255)
*/
PowerSaver(int dimTimeoutMs = 10000, int sleepTimeoutMs = 10000, int dimBrightness = 30, int fullBrightness = 255);
/**
* @brief Starts the timer that will dim the display after the chosen timeout.
*
* This call stops any running sleep timer. After the dim timer fires,
* the sleep timer is automatically started.
*/
void startDimTimer();
/**
* @brief Starts the sleep timer, that will sleep Mono after the chson timeout.
*
* This will stop a running *dim timer*, and trigger sleep mode on timeout.
*/
void startSleepTimer();
/**
* @brief Immidiately dim the display and then start the sleep timer.
*
* This will (asynchronously) dim the display to the chosen brightness level.
* WHen the display has been dimmed, the sleep timer is started.
*/
void dim();
/**
* @brief Immidiately undim the display to full brightness.
*
* This will stop the sleep timer and re-start the dim timer.
*/
void undim();
/**
* @brief Disable the PowerSaver all together
*
* No dimming or sleep is triggered. You must call @ref startDimTimer
* to re-enabled the PowerSaver.
*/
void deactivate();
virtual void RespondTouchBegin(TouchEvent &event);
};
}
#endif /* power_saver_h */
|
// coerceSlice creates a new slice of the appropriate type for the target field
// and coerces each of the query parameter values into the destination type.
// Should any of the provided query parameters fail to be coerced, an error is
// returned and the entire slice will not be applied
func (d *decoder) coerceSlice(query []string, target reflect.Kind, field reflect.Value) error {
var err error
sliceType := field.Type().Elem()
coerceKind := sliceType.Kind()
sl := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, 0)
slice := reflect.New(sl.Type())
slice.Elem().Set(sl)
for _, q := range query {
val := reflect.New(sliceType).Elem()
err = d.coerce(q, coerceKind, val)
if err != nil {
return err
}
slice.Elem().Set(reflect.Append(slice.Elem(), val))
}
field.Set(slice.Elem())
return nil
} |
<gh_stars>0
import { _City } from "./_City";
import { _Country } from "./_Country";
import { _GeoLocation } from "./_GeoLocation";
import { _Organization } from "./_Organization";
import { Structure as _Structure_ } from "@aws-sdk/types";
export const _RemoteIpDetails: _Structure_ = {
type: "structure",
required: [],
members: {
City: {
shape: _City,
locationName: "city"
},
Country: {
shape: _Country,
locationName: "country"
},
GeoLocation: {
shape: _GeoLocation,
locationName: "geoLocation"
},
IpAddressV4: {
shape: {
type: "string"
},
locationName: "ipAddressV4"
},
Organization: {
shape: _Organization,
locationName: "organization"
}
}
};
|
#include <bits/stdc++.h>
#define REP(i,n) for (int i=0; i<(n); ++i)
#define REPR(i,n,m) for (int i=(n); i>=(m); --i)
using namespace std;
using LL = long long;
using LD = long double;
using PLL = pair<long long, long long>;
using PLD = pair<long double, long double>;
using VLL = vector<long long>;
using VLD = vector<long double>;
using VPLL = vector<PLL>;
using VPLD = vector<PLD>;
const int INF = numeric_limits<int>::max();
const unsigned long long int ULLINF = numeric_limits<unsigned long long int>::max();
int main() {
LL a, b, c, d;
cin >> a >> b >> c >> d;
VLL ans(4,0);
ans[0] = a * c;
ans[1] = b * d;
ans[2] = a * d;
ans[3] = b * c;
LL anss;
anss = max(ans[0], ans[1]);
anss = max(anss, ans[2]);
anss = max(anss, ans[3]);
cout << anss << endl;
return 0;
}
|
/**
* Lists the reports that are completed and is amendable.
*
* @return the list
*/
public List<Report> findCompletedAmendableReports(){
List<Report> completedReports = listReportsHavingStatus(ReportStatus.COMPLETED);
List<Report> amendableReports = new ArrayList<Report>();
for(Report report : completedReports){
if(report.isAmendable()) amendableReports.add(report);
}
return amendableReports;
} |
A randomized, double-blind, controlled trial on non-opioid analgesics and opioid consumption for postoperative pain relief after laparoscopic cholecystectomy.
BACKGROUND
Following laparoscopic cholecystectomy, an effective post-operative pain control is necessary, at least during the first 24 hours. We present a randomized, double-blind trial on the effect of the combined use of intravenous parecoxib, and metamizol or paracetamol on piritramide consumption using a patient-controlled analgesia (PCA) pump in patients recovering from laparoscopic cholecystectomy.
METHODS
120 patients were randomly allocated to four patient groups treated with normal saline or one of non-opioid analgesics (parecoxib 40 mg twice daily, metamizol 1 g three times daily, paracetamol 1 g three times daily) in addition to piritramide using the PCA pump. Beginning in the post-anesthesia care unit (PACU), patients were asked every 2 h for 6 hours and afterwards once every 6 h to quantify their pain experience at rest while piritramide consumption was recorded.
RESULTS
In all groups, piritramide consumption was high in PACU. Only metamizol significantly reduced piritramide consumption compared to the others upon discharge from PACU. Overall, cumulative piritramide consumption was slightly lower in the metamizol group and higher in the NaCl group; however, these findings were statistically not significant. VAS scores were highest upon arrival in PACU and dropped almost continuously after surgery. A significantly lower postoperative pain intensity was only found in the parecoxib group at 24 h after surgery compared to the metamizol group.
CONCLUSION
The efficacy of tested additive medications on piritramide consumption and pain relief is weak and there is no clear-cut difference between the non-opioid drugs used. |
"""
Adobe Experience Manager (AEM) API
Swagger AEM is an OpenAPI specification for Adobe Experience Manager (AEM) API # noqa: E501
The version of the OpenAPI document: 3.5.0-pre.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from swaggeraem.api_client import ApiClient, Endpoint
from swaggeraem.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from swaggeraem.model.keystore_info import KeystoreInfo
from swaggeraem.model.truststore_info import TruststoreInfo
class SlingApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __delete_agent(
self,
runmode,
name,
**kwargs
):
"""delete_agent # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_agent(runmode, name, async_req=True)
>>> result = thread.get()
Args:
runmode (str):
name (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['runmode'] = \
runmode
kwargs['name'] = \
name
return self.call_with_http_info(**kwargs)
self.delete_agent = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/etc/replication/agents.{runmode}/{name}',
'operation_id': 'delete_agent',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'runmode',
'name',
],
'required': [
'runmode',
'name',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'runmode':
(str,),
'name':
(str,),
},
'attribute_map': {
'runmode': 'runmode',
'name': 'name',
},
'location_map': {
'runmode': 'path',
'name': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__delete_agent
)
def __delete_node(
self,
path,
name,
**kwargs
):
"""delete_node # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_node(path, name, async_req=True)
>>> result = thread.get()
Args:
path (str):
name (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['path'] = \
path
kwargs['name'] = \
name
return self.call_with_http_info(**kwargs)
self.delete_node = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/{path}/{name}',
'operation_id': 'delete_node',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'path',
'name',
],
'required': [
'path',
'name',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'path':
(str,),
'name':
(str,),
},
'attribute_map': {
'path': 'path',
'name': 'name',
},
'location_map': {
'path': 'path',
'name': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__delete_node
)
def __get_agent(
self,
runmode,
name,
**kwargs
):
"""get_agent # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_agent(runmode, name, async_req=True)
>>> result = thread.get()
Args:
runmode (str):
name (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['runmode'] = \
runmode
kwargs['name'] = \
name
return self.call_with_http_info(**kwargs)
self.get_agent = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/etc/replication/agents.{runmode}/{name}',
'operation_id': 'get_agent',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'runmode',
'name',
],
'required': [
'runmode',
'name',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'runmode':
(str,),
'name':
(str,),
},
'attribute_map': {
'runmode': 'runmode',
'name': 'name',
},
'location_map': {
'runmode': 'path',
'name': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__get_agent
)
def __get_agents(
self,
runmode,
**kwargs
):
"""get_agents # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_agents(runmode, async_req=True)
>>> result = thread.get()
Args:
runmode (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
str
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['runmode'] = \
runmode
return self.call_with_http_info(**kwargs)
self.get_agents = Endpoint(
settings={
'response_type': (str,),
'auth': [
'aemAuth'
],
'endpoint_path': '/etc/replication/agents.{runmode}.-1.json',
'operation_id': 'get_agents',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'runmode',
],
'required': [
'runmode',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'runmode':
(str,),
},
'attribute_map': {
'runmode': 'runmode',
},
'location_map': {
'runmode': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_agents
)
def __get_authorizable_keystore(
self,
intermediate_path,
authorizable_id,
**kwargs
):
"""get_authorizable_keystore # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_authorizable_keystore(intermediate_path, authorizable_id, async_req=True)
>>> result = thread.get()
Args:
intermediate_path (str):
authorizable_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
KeystoreInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['intermediate_path'] = \
intermediate_path
kwargs['authorizable_id'] = \
authorizable_id
return self.call_with_http_info(**kwargs)
self.get_authorizable_keystore = Endpoint(
settings={
'response_type': (KeystoreInfo,),
'auth': [
'aemAuth'
],
'endpoint_path': '/{intermediatePath}/{authorizableId}.ks.json',
'operation_id': 'get_authorizable_keystore',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'intermediate_path',
'authorizable_id',
],
'required': [
'intermediate_path',
'authorizable_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'intermediate_path':
(str,),
'authorizable_id':
(str,),
},
'attribute_map': {
'intermediate_path': 'intermediatePath',
'authorizable_id': 'authorizableId',
},
'location_map': {
'intermediate_path': 'path',
'authorizable_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'text/plain'
],
'content_type': [],
},
api_client=api_client,
callable=__get_authorizable_keystore
)
def __get_keystore(
self,
intermediate_path,
authorizable_id,
**kwargs
):
"""get_keystore # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_keystore(intermediate_path, authorizable_id, async_req=True)
>>> result = thread.get()
Args:
intermediate_path (str):
authorizable_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
file_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['intermediate_path'] = \
intermediate_path
kwargs['authorizable_id'] = \
authorizable_id
return self.call_with_http_info(**kwargs)
self.get_keystore = Endpoint(
settings={
'response_type': (file_type,),
'auth': [
'aemAuth'
],
'endpoint_path': '/{intermediatePath}/{authorizableId}/keystore/store.p12',
'operation_id': 'get_keystore',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'intermediate_path',
'authorizable_id',
],
'required': [
'intermediate_path',
'authorizable_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'intermediate_path':
(str,),
'authorizable_id':
(str,),
},
'attribute_map': {
'intermediate_path': 'intermediatePath',
'authorizable_id': 'authorizableId',
},
'location_map': {
'intermediate_path': 'path',
'authorizable_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/octet-stream'
],
'content_type': [],
},
api_client=api_client,
callable=__get_keystore
)
def __get_node(
self,
path,
name,
**kwargs
):
"""get_node # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_node(path, name, async_req=True)
>>> result = thread.get()
Args:
path (str):
name (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['path'] = \
path
kwargs['name'] = \
name
return self.call_with_http_info(**kwargs)
self.get_node = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/{path}/{name}',
'operation_id': 'get_node',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'path',
'name',
],
'required': [
'path',
'name',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'path':
(str,),
'name':
(str,),
},
'attribute_map': {
'path': 'path',
'name': 'name',
},
'location_map': {
'path': 'path',
'name': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__get_node
)
def __get_package(
self,
group,
name,
version,
**kwargs
):
"""get_package # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_package(group, name, version, async_req=True)
>>> result = thread.get()
Args:
group (str):
name (str):
version (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
file_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['group'] = \
group
kwargs['name'] = \
name
kwargs['version'] = \
version
return self.call_with_http_info(**kwargs)
self.get_package = Endpoint(
settings={
'response_type': (file_type,),
'auth': [
'aemAuth'
],
'endpoint_path': '/etc/packages/{group}/{name}-{version}.zip',
'operation_id': 'get_package',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'group',
'name',
'version',
],
'required': [
'group',
'name',
'version',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'group':
(str,),
'name':
(str,),
'version':
(str,),
},
'attribute_map': {
'group': 'group',
'name': 'name',
'version': 'version',
},
'location_map': {
'group': 'path',
'name': 'path',
'version': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/octet-stream'
],
'content_type': [],
},
api_client=api_client,
callable=__get_package
)
def __get_package_filter(
self,
group,
name,
version,
**kwargs
):
"""get_package_filter # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_package_filter(group, name, version, async_req=True)
>>> result = thread.get()
Args:
group (str):
name (str):
version (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
str
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['group'] = \
group
kwargs['name'] = \
name
kwargs['version'] = \
version
return self.call_with_http_info(**kwargs)
self.get_package_filter = Endpoint(
settings={
'response_type': (str,),
'auth': [
'aemAuth'
],
'endpoint_path': '/etc/packages/{group}/{name}-{version}.zip/jcr:content/vlt:definition/filter.tidy.2.json',
'operation_id': 'get_package_filter',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'group',
'name',
'version',
],
'required': [
'group',
'name',
'version',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'group':
(str,),
'name':
(str,),
'version':
(str,),
},
'attribute_map': {
'group': 'group',
'name': 'name',
'version': 'version',
},
'location_map': {
'group': 'path',
'name': 'path',
'version': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_package_filter
)
def __get_query(
self,
path,
p_limit,
_1_property,
_1_property_value,
**kwargs
):
"""get_query # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_query(path, p_limit, _1_property, _1_property_value, async_req=True)
>>> result = thread.get()
Args:
path (str):
p_limit (float):
_1_property (str):
_1_property_value (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
str
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['path'] = \
path
kwargs['p_limit'] = \
p_limit
kwargs['_1_property'] = \
_1_property
kwargs['_1_property_value'] = \
_1_property_value
return self.call_with_http_info(**kwargs)
self.get_query = Endpoint(
settings={
'response_type': (str,),
'auth': [
'aemAuth'
],
'endpoint_path': '/bin/querybuilder.json',
'operation_id': 'get_query',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'path',
'p_limit',
'_1_property',
'_1_property_value',
],
'required': [
'path',
'p_limit',
'_1_property',
'_1_property_value',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'path':
(str,),
'p_limit':
(float,),
'_1_property':
(str,),
'_1_property_value':
(str,),
},
'attribute_map': {
'path': 'path',
'p_limit': 'p.limit',
'_1_property': '1_property',
'_1_property_value': '1_property.value',
},
'location_map': {
'path': 'query',
'p_limit': 'query',
'_1_property': 'query',
'_1_property_value': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_query
)
def __get_truststore(
self,
**kwargs
):
"""get_truststore # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_truststore(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
file_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_truststore = Endpoint(
settings={
'response_type': (file_type,),
'auth': [
'aemAuth'
],
'endpoint_path': '/etc/truststore/truststore.p12',
'operation_id': 'get_truststore',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/octet-stream'
],
'content_type': [],
},
api_client=api_client,
callable=__get_truststore
)
def __get_truststore_info(
self,
**kwargs
):
"""get_truststore_info # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_truststore_info(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
TruststoreInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_truststore_info = Endpoint(
settings={
'response_type': (TruststoreInfo,),
'auth': [
'aemAuth'
],
'endpoint_path': '/libs/granite/security/truststore.json',
'operation_id': 'get_truststore_info',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_truststore_info
)
def __post_agent(
self,
runmode,
name,
**kwargs
):
"""post_agent # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_agent(runmode, name, async_req=True)
>>> result = thread.get()
Args:
runmode (str):
name (str):
Keyword Args:
jcrcontent_cqdistribute (bool): [optional]
jcrcontent_cqdistribute_type_hint (str): [optional]
jcrcontent_cqname (str): [optional]
jcrcontent_cqtemplate (str): [optional]
jcrcontent_enabled (bool): [optional]
jcrcontent_jcrdescription (str): [optional]
jcrcontent_jcrlast_modified (str): [optional]
jcrcontent_jcrlast_modified_by (str): [optional]
jcrcontent_jcrmixin_types (str): [optional]
jcrcontent_jcrtitle (str): [optional]
jcrcontent_log_level (str): [optional]
jcrcontent_no_status_update (bool): [optional]
jcrcontent_no_versioning (bool): [optional]
jcrcontent_protocol_connect_timeout (float): [optional]
jcrcontent_protocol_http_connection_closed (bool): [optional]
jcrcontent_protocol_http_expired (str): [optional]
jcrcontent_protocol_http_headers ([str]): [optional]
jcrcontent_protocol_http_headers_type_hint (str): [optional]
jcrcontent_protocol_http_method (str): [optional]
jcrcontent_protocol_https_relaxed (bool): [optional]
jcrcontent_protocol_interface (str): [optional]
jcrcontent_protocol_socket_timeout (float): [optional]
jcrcontent_protocol_version (str): [optional]
jcrcontent_proxy_ntlm_domain (str): [optional]
jcrcontent_proxy_ntlm_host (str): [optional]
jcrcontent_proxy_host (str): [optional]
jcrcontent_proxy_password (str): [optional]
jcrcontent_proxy_port (float): [optional]
jcrcontent_proxy_user (str): [optional]
jcrcontent_queue_batch_max_size (float): [optional]
jcrcontent_queue_batch_mode (str): [optional]
jcrcontent_queue_batch_wait_time (float): [optional]
jcrcontent_retry_delay (str): [optional]
jcrcontent_reverse_replication (bool): [optional]
jcrcontent_serialization_type (str): [optional]
jcrcontent_slingresource_type (str): [optional]
jcrcontent_ssl (str): [optional]
jcrcontent_transport_ntlm_domain (str): [optional]
jcrcontent_transport_ntlm_host (str): [optional]
jcrcontent_transport_password (str): [optional]
jcrcontent_transport_uri (str): [optional]
jcrcontent_transport_user (str): [optional]
jcrcontent_trigger_distribute (bool): [optional]
jcrcontent_trigger_modified (bool): [optional]
jcrcontent_trigger_on_off_time (bool): [optional]
jcrcontent_trigger_receive (bool): [optional]
jcrcontent_trigger_specific (bool): [optional]
jcrcontent_user_id (str): [optional]
jcrprimary_type (str): [optional]
operation (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['runmode'] = \
runmode
kwargs['name'] = \
name
return self.call_with_http_info(**kwargs)
self.post_agent = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/etc/replication/agents.{runmode}/{name}',
'operation_id': 'post_agent',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'runmode',
'name',
'jcrcontent_cqdistribute',
'jcrcontent_cqdistribute_type_hint',
'jcrcontent_cqname',
'jcrcontent_cqtemplate',
'jcrcontent_enabled',
'jcrcontent_jcrdescription',
'jcrcontent_jcrlast_modified',
'jcrcontent_jcrlast_modified_by',
'jcrcontent_jcrmixin_types',
'jcrcontent_jcrtitle',
'jcrcontent_log_level',
'jcrcontent_no_status_update',
'jcrcontent_no_versioning',
'jcrcontent_protocol_connect_timeout',
'jcrcontent_protocol_http_connection_closed',
'jcrcontent_protocol_http_expired',
'jcrcontent_protocol_http_headers',
'jcrcontent_protocol_http_headers_type_hint',
'jcrcontent_protocol_http_method',
'jcrcontent_protocol_https_relaxed',
'jcrcontent_protocol_interface',
'jcrcontent_protocol_socket_timeout',
'jcrcontent_protocol_version',
'jcrcontent_proxy_ntlm_domain',
'jcrcontent_proxy_ntlm_host',
'jcrcontent_proxy_host',
'jcrcontent_proxy_password',
'jcrcontent_proxy_port',
'jcrcontent_proxy_user',
'jcrcontent_queue_batch_max_size',
'jcrcontent_queue_batch_mode',
'jcrcontent_queue_batch_wait_time',
'jcrcontent_retry_delay',
'jcrcontent_reverse_replication',
'jcrcontent_serialization_type',
'jcrcontent_slingresource_type',
'jcrcontent_ssl',
'jcrcontent_transport_ntlm_domain',
'jcrcontent_transport_ntlm_host',
'jcrcontent_transport_password',
'jcrcontent_transport_uri',
'jcrcontent_transport_user',
'jcrcontent_trigger_distribute',
'jcrcontent_trigger_modified',
'jcrcontent_trigger_on_off_time',
'jcrcontent_trigger_receive',
'jcrcontent_trigger_specific',
'jcrcontent_user_id',
'jcrprimary_type',
'operation',
],
'required': [
'runmode',
'name',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'runmode':
(str,),
'name':
(str,),
'jcrcontent_cqdistribute':
(bool,),
'jcrcontent_cqdistribute_type_hint':
(str,),
'jcrcontent_cqname':
(str,),
'jcrcontent_cqtemplate':
(str,),
'jcrcontent_enabled':
(bool,),
'jcrcontent_jcrdescription':
(str,),
'jcrcontent_jcrlast_modified':
(str,),
'jcrcontent_jcrlast_modified_by':
(str,),
'jcrcontent_jcrmixin_types':
(str,),
'jcrcontent_jcrtitle':
(str,),
'jcrcontent_log_level':
(str,),
'jcrcontent_no_status_update':
(bool,),
'jcrcontent_no_versioning':
(bool,),
'jcrcontent_protocol_connect_timeout':
(float,),
'jcrcontent_protocol_http_connection_closed':
(bool,),
'jcrcontent_protocol_http_expired':
(str,),
'jcrcontent_protocol_http_headers':
([str],),
'jcrcontent_protocol_http_headers_type_hint':
(str,),
'jcrcontent_protocol_http_method':
(str,),
'jcrcontent_protocol_https_relaxed':
(bool,),
'jcrcontent_protocol_interface':
(str,),
'jcrcontent_protocol_socket_timeout':
(float,),
'jcrcontent_protocol_version':
(str,),
'jcrcontent_proxy_ntlm_domain':
(str,),
'jcrcontent_proxy_ntlm_host':
(str,),
'jcrcontent_proxy_host':
(str,),
'jcrcontent_proxy_password':
(str,),
'jcrcontent_proxy_port':
(float,),
'jcrcontent_proxy_user':
(str,),
'jcrcontent_queue_batch_max_size':
(float,),
'jcrcontent_queue_batch_mode':
(str,),
'jcrcontent_queue_batch_wait_time':
(float,),
'jcrcontent_retry_delay':
(str,),
'jcrcontent_reverse_replication':
(bool,),
'jcrcontent_serialization_type':
(str,),
'jcrcontent_slingresource_type':
(str,),
'jcrcontent_ssl':
(str,),
'jcrcontent_transport_ntlm_domain':
(str,),
'jcrcontent_transport_ntlm_host':
(str,),
'jcrcontent_transport_password':
(str,),
'jcrcontent_transport_uri':
(str,),
'jcrcontent_transport_user':
(str,),
'jcrcontent_trigger_distribute':
(bool,),
'jcrcontent_trigger_modified':
(bool,),
'jcrcontent_trigger_on_off_time':
(bool,),
'jcrcontent_trigger_receive':
(bool,),
'jcrcontent_trigger_specific':
(bool,),
'jcrcontent_user_id':
(str,),
'jcrprimary_type':
(str,),
'operation':
(str,),
},
'attribute_map': {
'runmode': 'runmode',
'name': 'name',
'jcrcontent_cqdistribute': 'jcr:content/cq:distribute',
'jcrcontent_cqdistribute_type_hint': 'jcr:content/cq:distribute@TypeHint',
'jcrcontent_cqname': 'jcr:content/cq:name',
'jcrcontent_cqtemplate': 'jcr:content/cq:template',
'jcrcontent_enabled': 'jcr:content/enabled',
'jcrcontent_jcrdescription': 'jcr:content/jcr:description',
'jcrcontent_jcrlast_modified': 'jcr:content/jcr:lastModified',
'jcrcontent_jcrlast_modified_by': 'jcr:content/jcr:lastModifiedBy',
'jcrcontent_jcrmixin_types': 'jcr:content/jcr:mixinTypes',
'jcrcontent_jcrtitle': 'jcr:content/jcr:title',
'jcrcontent_log_level': 'jcr:content/logLevel',
'jcrcontent_no_status_update': 'jcr:content/noStatusUpdate',
'jcrcontent_no_versioning': 'jcr:content/noVersioning',
'jcrcontent_protocol_connect_timeout': 'jcr:content/protocolConnectTimeout',
'jcrcontent_protocol_http_connection_closed': 'jcr:content/protocolHTTPConnectionClosed',
'jcrcontent_protocol_http_expired': 'jcr:content/protocolHTTPExpired',
'jcrcontent_protocol_http_headers': 'jcr:content/protocolHTTPHeaders',
'jcrcontent_protocol_http_headers_type_hint': 'jcr:content/protocolHTTPHeaders@TypeHint',
'jcrcontent_protocol_http_method': 'jcr:content/protocolHTTPMethod',
'jcrcontent_protocol_https_relaxed': 'jcr:content/protocolHTTPSRelaxed',
'jcrcontent_protocol_interface': 'jcr:content/protocolInterface',
'jcrcontent_protocol_socket_timeout': 'jcr:content/protocolSocketTimeout',
'jcrcontent_protocol_version': 'jcr:content/protocolVersion',
'jcrcontent_proxy_ntlm_domain': 'jcr:content/proxyNTLMDomain',
'jcrcontent_proxy_ntlm_host': 'jcr:content/proxyNTLMHost',
'jcrcontent_proxy_host': 'jcr:content/proxyHost',
'jcrcontent_proxy_password': '<PASSWORD>',
'jcrcontent_proxy_port': 'jcr:content/proxyPort',
'jcrcontent_proxy_user': 'jcr:content/proxyUser',
'jcrcontent_queue_batch_max_size': 'jcr:content/queueBatchMaxSize',
'jcrcontent_queue_batch_mode': 'jcr:content/queueBatchMode',
'jcrcontent_queue_batch_wait_time': 'jcr:content/queueBatchWaitTime',
'jcrcontent_retry_delay': 'jcr:content/retryDelay',
'jcrcontent_reverse_replication': 'jcr:content/reverseReplication',
'jcrcontent_serialization_type': 'jcr:content/serializationType',
'jcrcontent_slingresource_type': 'jcr:content/sling:resourceType',
'jcrcontent_ssl': 'jcr:content/ssl',
'jcrcontent_transport_ntlm_domain': 'jcr:content/transportNTLMDomain',
'jcrcontent_transport_ntlm_host': 'jcr:content/transportNTLMHost',
'jcrcontent_transport_password': '<PASSWORD>',
'jcrcontent_transport_uri': 'jcr:content/transportUri',
'jcrcontent_transport_user': 'jcr:content/transportUser',
'jcrcontent_trigger_distribute': 'jcr:content/triggerDistribute',
'jcrcontent_trigger_modified': 'jcr:content/triggerModified',
'jcrcontent_trigger_on_off_time': 'jcr:content/triggerOnOffTime',
'jcrcontent_trigger_receive': 'jcr:content/triggerReceive',
'jcrcontent_trigger_specific': 'jcr:content/triggerSpecific',
'jcrcontent_user_id': 'jcr:content/userId',
'jcrprimary_type': 'jcr:primaryType',
'operation': ':operation',
},
'location_map': {
'runmode': 'path',
'name': 'path',
'jcrcontent_cqdistribute': 'query',
'jcrcontent_cqdistribute_type_hint': 'query',
'jcrcontent_cqname': 'query',
'jcrcontent_cqtemplate': 'query',
'jcrcontent_enabled': 'query',
'jcrcontent_jcrdescription': 'query',
'jcrcontent_jcrlast_modified': 'query',
'jcrcontent_jcrlast_modified_by': 'query',
'jcrcontent_jcrmixin_types': 'query',
'jcrcontent_jcrtitle': 'query',
'jcrcontent_log_level': 'query',
'jcrcontent_no_status_update': 'query',
'jcrcontent_no_versioning': 'query',
'jcrcontent_protocol_connect_timeout': 'query',
'jcrcontent_protocol_http_connection_closed': 'query',
'jcrcontent_protocol_http_expired': 'query',
'jcrcontent_protocol_http_headers': 'query',
'jcrcontent_protocol_http_headers_type_hint': 'query',
'jcrcontent_protocol_http_method': 'query',
'jcrcontent_protocol_https_relaxed': 'query',
'jcrcontent_protocol_interface': 'query',
'jcrcontent_protocol_socket_timeout': 'query',
'jcrcontent_protocol_version': 'query',
'jcrcontent_proxy_ntlm_domain': 'query',
'jcrcontent_proxy_ntlm_host': 'query',
'jcrcontent_proxy_host': 'query',
'jcrcontent_proxy_password': '<PASSWORD>',
'jcrcontent_proxy_port': 'query',
'jcrcontent_proxy_user': 'query',
'jcrcontent_queue_batch_max_size': 'query',
'jcrcontent_queue_batch_mode': 'query',
'jcrcontent_queue_batch_wait_time': 'query',
'jcrcontent_retry_delay': 'query',
'jcrcontent_reverse_replication': 'query',
'jcrcontent_serialization_type': 'query',
'jcrcontent_slingresource_type': 'query',
'jcrcontent_ssl': 'query',
'jcrcontent_transport_ntlm_domain': 'query',
'jcrcontent_transport_ntlm_host': 'query',
'jcrcontent_transport_password': '<PASSWORD>',
'jcrcontent_transport_uri': 'query',
'jcrcontent_transport_user': 'query',
'jcrcontent_trigger_distribute': 'query',
'jcrcontent_trigger_modified': 'query',
'jcrcontent_trigger_on_off_time': 'query',
'jcrcontent_trigger_receive': 'query',
'jcrcontent_trigger_specific': 'query',
'jcrcontent_user_id': 'query',
'jcrprimary_type': 'query',
'operation': 'query',
},
'collection_format_map': {
'jcrcontent_protocol_http_headers': 'multi',
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__post_agent
)
def __post_authorizable_keystore(
self,
intermediate_path,
authorizable_id,
**kwargs
):
"""post_authorizable_keystore # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_authorizable_keystore(intermediate_path, authorizable_id, async_req=True)
>>> result = thread.get()
Args:
intermediate_path (str):
authorizable_id (str):
Keyword Args:
operation (str): [optional]
current_password (str): [optional]
new_password (str): [optional]
re_password (str): [optional]
key_password (str): [optional]
key_store_pass (str): [optional]
alias (str): [optional]
new_alias (str): [optional]
remove_alias (str): [optional]
cert_chain (file_type): [optional]
pk (file_type): [optional]
key_store (file_type): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
KeystoreInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['intermediate_path'] = \
intermediate_path
kwargs['authorizable_id'] = \
authorizable_id
return self.call_with_http_info(**kwargs)
self.post_authorizable_keystore = Endpoint(
settings={
'response_type': (KeystoreInfo,),
'auth': [
'aemAuth'
],
'endpoint_path': '/{intermediatePath}/{authorizableId}.ks.html',
'operation_id': 'post_authorizable_keystore',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'intermediate_path',
'authorizable_id',
'operation',
'current_password',
'new_password',
're_password',
'key_password',
'key_store_pass',
'alias',
'new_alias',
'remove_alias',
'cert_chain',
'pk',
'key_store',
],
'required': [
'intermediate_path',
'authorizable_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'intermediate_path':
(str,),
'authorizable_id':
(str,),
'operation':
(str,),
'current_password':
(str,),
'new_password':
(str,),
're_password':
(str,),
'key_password':
(str,),
'key_store_pass':
(str,),
'alias':
(str,),
'new_alias':
(str,),
'remove_alias':
(str,),
'cert_chain':
(file_type,),
'pk':
(file_type,),
'key_store':
(file_type,),
},
'attribute_map': {
'intermediate_path': 'intermediatePath',
'authorizable_id': 'authorizableId',
'operation': ':operation',
'current_password': '<PASSWORD>',
'new_password': '<PASSWORD>',
're_password': '<PASSWORD>',
'key_password': '<PASSWORD>',
'key_store_pass': '<PASSWORD>',
'alias': 'alias',
'new_alias': 'newAlias',
'remove_alias': 'removeAlias',
'cert_chain': 'cert-chain',
'pk': 'pk',
'key_store': 'keyStore',
},
'location_map': {
'intermediate_path': 'path',
'authorizable_id': 'path',
'operation': 'query',
'current_password': 'query',
'new_password': 'query',
're_password': 'query',
'key_password': 'query',
'key_store_pass': 'query',
'alias': 'query',
'new_alias': 'query',
'remove_alias': 'query',
'cert_chain': 'form',
'pk': 'form',
'key_store': 'form',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'text/plain'
],
'content_type': [
'multipart/form-data'
]
},
api_client=api_client,
callable=__post_authorizable_keystore
)
def __post_authorizables(
self,
authorizable_id,
intermediate_path,
**kwargs
):
"""post_authorizables # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_authorizables(authorizable_id, intermediate_path, async_req=True)
>>> result = thread.get()
Args:
authorizable_id (str):
intermediate_path (str):
Keyword Args:
create_user (str): [optional]
create_group (str): [optional]
reppassword (str): [optional]
profile_given_name (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
str
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['authorizable_id'] = \
authorizable_id
kwargs['intermediate_path'] = \
intermediate_path
return self.call_with_http_info(**kwargs)
self.post_authorizables = Endpoint(
settings={
'response_type': (str,),
'auth': [
'aemAuth'
],
'endpoint_path': '/libs/granite/security/post/authorizables',
'operation_id': 'post_authorizables',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'authorizable_id',
'intermediate_path',
'create_user',
'create_group',
'reppassword',
'profile_given_name',
],
'required': [
'authorizable_id',
'intermediate_path',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'authorizable_id':
(str,),
'intermediate_path':
(str,),
'create_user':
(str,),
'create_group':
(str,),
'reppassword':
(str,),
'profile_given_name':
(str,),
},
'attribute_map': {
'authorizable_id': 'authorizableId',
'intermediate_path': 'intermediatePath',
'create_user': 'createUser',
'create_group': 'createGroup',
'reppassword': '<PASSWORD>',
'profile_given_name': 'profile/givenName',
},
'location_map': {
'authorizable_id': 'query',
'intermediate_path': 'query',
'create_user': 'query',
'create_group': 'query',
'reppassword': '<PASSWORD>',
'profile_given_name': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'text/html'
],
'content_type': [],
},
api_client=api_client,
callable=__post_authorizables
)
def __post_config_adobe_granite_saml_authentication_handler(
self,
**kwargs
):
"""post_config_adobe_granite_saml_authentication_handler # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_config_adobe_granite_saml_authentication_handler(async_req=True)
>>> result = thread.get()
Keyword Args:
key_store_password (str): [optional]
key_store_password_type_hint (str): [optional]
service_ranking (int): [optional]
service_ranking_type_hint (str): [optional]
idp_http_redirect (bool): [optional]
idp_http_redirect_type_hint (str): [optional]
create_user (bool): [optional]
create_user_type_hint (str): [optional]
default_redirect_url (str): [optional]
default_redirect_url_type_hint (str): [optional]
user_id_attribute (str): [optional]
user_id_attribute_type_hint (str): [optional]
default_groups ([str]): [optional]
default_groups_type_hint (str): [optional]
idp_cert_alias (str): [optional]
idp_cert_alias_type_hint (str): [optional]
add_group_memberships (bool): [optional]
add_group_memberships_type_hint (str): [optional]
path ([str]): [optional]
path_type_hint (str): [optional]
synchronize_attributes ([str]): [optional]
synchronize_attributes_type_hint (str): [optional]
clock_tolerance (int): [optional]
clock_tolerance_type_hint (str): [optional]
group_membership_attribute (str): [optional]
group_membership_attribute_type_hint (str): [optional]
idp_url (str): [optional]
idp_url_type_hint (str): [optional]
logout_url (str): [optional]
logout_url_type_hint (str): [optional]
service_provider_entity_id (str): [optional]
service_provider_entity_id_type_hint (str): [optional]
assertion_consumer_service_url (str): [optional]
assertion_consumer_service_url_type_hint (str): [optional]
handle_logout (bool): [optional]
handle_logout_type_hint (str): [optional]
sp_private_key_alias (str): [optional]
sp_private_key_alias_type_hint (str): [optional]
use_encryption (bool): [optional]
use_encryption_type_hint (str): [optional]
name_id_format (str): [optional]
name_id_format_type_hint (str): [optional]
digest_method (str): [optional]
digest_method_type_hint (str): [optional]
signature_method (str): [optional]
signature_method_type_hint (str): [optional]
user_intermediate_path (str): [optional]
user_intermediate_path_type_hint (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.post_config_adobe_granite_saml_authentication_handler = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/apps/system/config/com.adobe.granite.auth.saml.SamlAuthenticationHandler.config',
'operation_id': 'post_config_adobe_granite_saml_authentication_handler',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'key_store_password',
'key_store_password_type_hint',
'service_ranking',
'service_ranking_type_hint',
'idp_http_redirect',
'idp_http_redirect_type_hint',
'create_user',
'create_user_type_hint',
'default_redirect_url',
'default_redirect_url_type_hint',
'user_id_attribute',
'user_id_attribute_type_hint',
'default_groups',
'default_groups_type_hint',
'idp_cert_alias',
'idp_cert_alias_type_hint',
'add_group_memberships',
'add_group_memberships_type_hint',
'path',
'path_type_hint',
'synchronize_attributes',
'synchronize_attributes_type_hint',
'clock_tolerance',
'clock_tolerance_type_hint',
'group_membership_attribute',
'group_membership_attribute_type_hint',
'idp_url',
'idp_url_type_hint',
'logout_url',
'logout_url_type_hint',
'service_provider_entity_id',
'service_provider_entity_id_type_hint',
'assertion_consumer_service_url',
'assertion_consumer_service_url_type_hint',
'handle_logout',
'handle_logout_type_hint',
'sp_private_key_alias',
'sp_private_key_alias_type_hint',
'use_encryption',
'use_encryption_type_hint',
'name_id_format',
'name_id_format_type_hint',
'digest_method',
'digest_method_type_hint',
'signature_method',
'signature_method_type_hint',
'user_intermediate_path',
'user_intermediate_path_type_hint',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'key_store_password':
(str,),
'key_store_password_type_hint':
(str,),
'service_ranking':
(int,),
'service_ranking_type_hint':
(str,),
'idp_http_redirect':
(bool,),
'idp_http_redirect_type_hint':
(str,),
'create_user':
(bool,),
'create_user_type_hint':
(str,),
'default_redirect_url':
(str,),
'default_redirect_url_type_hint':
(str,),
'user_id_attribute':
(str,),
'user_id_attribute_type_hint':
(str,),
'default_groups':
([str],),
'default_groups_type_hint':
(str,),
'idp_cert_alias':
(str,),
'idp_cert_alias_type_hint':
(str,),
'add_group_memberships':
(bool,),
'add_group_memberships_type_hint':
(str,),
'path':
([str],),
'path_type_hint':
(str,),
'synchronize_attributes':
([str],),
'synchronize_attributes_type_hint':
(str,),
'clock_tolerance':
(int,),
'clock_tolerance_type_hint':
(str,),
'group_membership_attribute':
(str,),
'group_membership_attribute_type_hint':
(str,),
'idp_url':
(str,),
'idp_url_type_hint':
(str,),
'logout_url':
(str,),
'logout_url_type_hint':
(str,),
'service_provider_entity_id':
(str,),
'service_provider_entity_id_type_hint':
(str,),
'assertion_consumer_service_url':
(str,),
'assertion_consumer_service_url_type_hint':
(str,),
'handle_logout':
(bool,),
'handle_logout_type_hint':
(str,),
'sp_private_key_alias':
(str,),
'sp_private_key_alias_type_hint':
(str,),
'use_encryption':
(bool,),
'use_encryption_type_hint':
(str,),
'name_id_format':
(str,),
'name_id_format_type_hint':
(str,),
'digest_method':
(str,),
'digest_method_type_hint':
(str,),
'signature_method':
(str,),
'signature_method_type_hint':
(str,),
'user_intermediate_path':
(str,),
'user_intermediate_path_type_hint':
(str,),
},
'attribute_map': {
'key_store_password': '<PASSWORD>',
'key_store_password_type_hint': 'keyStorePassword@TypeHint',
'service_ranking': 'service.ranking',
'service_ranking_type_hint': 'service.ranking@TypeHint',
'idp_http_redirect': 'idpHttpRedirect',
'idp_http_redirect_type_hint': 'idpHttpRedirect@TypeHint',
'create_user': 'createUser',
'create_user_type_hint': 'createUser@TypeHint',
'default_redirect_url': 'defaultRedirectUrl',
'default_redirect_url_type_hint': 'defaultRedirectUrl@TypeHint',
'user_id_attribute': 'userIDAttribute',
'user_id_attribute_type_hint': 'userIDAttribute@TypeHint',
'default_groups': 'defaultGroups',
'default_groups_type_hint': 'defaultGroups@TypeHint',
'idp_cert_alias': 'idpCertAlias',
'idp_cert_alias_type_hint': 'idpCertAlias@TypeHint',
'add_group_memberships': 'addGroupMemberships',
'add_group_memberships_type_hint': 'addGroupMemberships@TypeHint',
'path': 'path',
'path_type_hint': 'path@TypeHint',
'synchronize_attributes': 'synchronizeAttributes',
'synchronize_attributes_type_hint': 'synchronizeAttributes@TypeHint',
'clock_tolerance': 'clockTolerance',
'clock_tolerance_type_hint': 'clockTolerance@TypeHint',
'group_membership_attribute': 'groupMembershipAttribute',
'group_membership_attribute_type_hint': 'groupMembershipAttribute@TypeHint',
'idp_url': 'idpUrl',
'idp_url_type_hint': 'idpUrl@TypeHint',
'logout_url': 'logoutUrl',
'logout_url_type_hint': 'logoutUrl@TypeHint',
'service_provider_entity_id': 'serviceProviderEntityId',
'service_provider_entity_id_type_hint': 'serviceProviderEntityId@TypeHint',
'assertion_consumer_service_url': 'assertionConsumerServiceURL',
'assertion_consumer_service_url_type_hint': 'assertionConsumerServiceURL@TypeHint',
'handle_logout': 'handleLogout',
'handle_logout_type_hint': 'handleLogout@TypeHint',
'sp_private_key_alias': 'spPrivateKeyAlias',
'sp_private_key_alias_type_hint': 'spPrivateKeyAlias@TypeHint',
'use_encryption': 'useEncryption',
'use_encryption_type_hint': 'useEncryption@TypeHint',
'name_id_format': 'nameIdFormat',
'name_id_format_type_hint': 'nameIdFormat@TypeHint',
'digest_method': 'digestMethod',
'digest_method_type_hint': 'digestMethod@TypeHint',
'signature_method': 'signatureMethod',
'signature_method_type_hint': 'signatureMethod@TypeHint',
'user_intermediate_path': 'userIntermediatePath',
'user_intermediate_path_type_hint': 'userIntermediatePath@TypeHint',
},
'location_map': {
'key_store_password': 'query',
'key_store_password_type_hint': 'query',
'service_ranking': 'query',
'service_ranking_type_hint': 'query',
'idp_http_redirect': 'query',
'idp_http_redirect_type_hint': 'query',
'create_user': 'query',
'create_user_type_hint': 'query',
'default_redirect_url': 'query',
'default_redirect_url_type_hint': 'query',
'user_id_attribute': 'query',
'user_id_attribute_type_hint': 'query',
'default_groups': 'query',
'default_groups_type_hint': 'query',
'idp_cert_alias': 'query',
'idp_cert_alias_type_hint': 'query',
'add_group_memberships': 'query',
'add_group_memberships_type_hint': 'query',
'path': 'query',
'path_type_hint': 'query',
'synchronize_attributes': 'query',
'synchronize_attributes_type_hint': 'query',
'clock_tolerance': 'query',
'clock_tolerance_type_hint': 'query',
'group_membership_attribute': 'query',
'group_membership_attribute_type_hint': 'query',
'idp_url': 'query',
'idp_url_type_hint': 'query',
'logout_url': 'query',
'logout_url_type_hint': 'query',
'service_provider_entity_id': 'query',
'service_provider_entity_id_type_hint': 'query',
'assertion_consumer_service_url': 'query',
'assertion_consumer_service_url_type_hint': 'query',
'handle_logout': 'query',
'handle_logout_type_hint': 'query',
'sp_private_key_alias': 'query',
'sp_private_key_alias_type_hint': 'query',
'use_encryption': 'query',
'use_encryption_type_hint': 'query',
'name_id_format': 'query',
'name_id_format_type_hint': 'query',
'digest_method': 'query',
'digest_method_type_hint': 'query',
'signature_method': 'query',
'signature_method_type_hint': 'query',
'user_intermediate_path': 'query',
'user_intermediate_path_type_hint': 'query',
},
'collection_format_map': {
'default_groups': 'multi',
'path': 'multi',
'synchronize_attributes': 'multi',
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__post_config_adobe_granite_saml_authentication_handler
)
def __post_config_apache_felix_jetty_based_http_service(
self,
**kwargs
):
"""post_config_apache_felix_jetty_based_http_service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_config_apache_felix_jetty_based_http_service(async_req=True)
>>> result = thread.get()
Keyword Args:
org_apache_felix_https_nio (bool): [optional]
org_apache_felix_https_nio_type_hint (str): [optional]
org_apache_felix_https_keystore (str): [optional]
org_apache_felix_https_keystore_type_hint (str): [optional]
org_apache_felix_https_keystore_password (str): [optional]
org_apache_felix_https_keystore_password_type_hint (str): [optional]
org_apache_felix_https_keystore_key (str): [optional]
org_apache_felix_https_keystore_key_type_hint (str): [optional]
org_apache_felix_https_keystore_key_password (str): [optional]
org_apache_felix_https_keystore_key_password_type_hint (str): [optional]
org_apache_felix_https_truststore (str): [optional]
org_apache_felix_https_truststore_type_hint (str): [optional]
org_apache_felix_https_truststore_password (str): [optional]
org_apache_felix_https_truststore_password_type_hint (str): [optional]
org_apache_felix_https_clientcertificate (str): [optional]
org_apache_felix_https_clientcertificate_type_hint (str): [optional]
org_apache_felix_https_enable (bool): [optional]
org_apache_felix_https_enable_type_hint (str): [optional]
org_osgi_service_http_port_secure (str): [optional]
org_osgi_service_http_port_secure_type_hint (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.post_config_apache_felix_jetty_based_http_service = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/apps/system/config/org.apache.felix.http',
'operation_id': 'post_config_apache_felix_jetty_based_http_service',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'org_apache_felix_https_nio',
'org_apache_felix_https_nio_type_hint',
'org_apache_felix_https_keystore',
'org_apache_felix_https_keystore_type_hint',
'org_apache_felix_https_keystore_password',
'org_apache_felix_https_keystore_password_type_hint',
'org_apache_felix_https_keystore_key',
'org_apache_felix_https_keystore_key_type_hint',
'org_apache_felix_https_keystore_key_password',
'org_apache_felix_https_keystore_key_password_type_hint',
'org_apache_felix_https_truststore',
'org_apache_felix_https_truststore_type_hint',
'org_apache_felix_https_truststore_password',
'org_apache_felix_https_truststore_password_type_hint',
'org_apache_felix_https_clientcertificate',
'org_apache_felix_https_clientcertificate_type_hint',
'org_apache_felix_https_enable',
'org_apache_felix_https_enable_type_hint',
'org_osgi_service_http_port_secure',
'org_osgi_service_http_port_secure_type_hint',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'org_apache_felix_https_nio':
(bool,),
'org_apache_felix_https_nio_type_hint':
(str,),
'org_apache_felix_https_keystore':
(str,),
'org_apache_felix_https_keystore_type_hint':
(str,),
'org_apache_felix_https_keystore_password':
(str,),
'org_apache_felix_https_keystore_password_type_hint':
(str,),
'org_apache_felix_https_keystore_key':
(str,),
'org_apache_felix_https_keystore_key_type_hint':
(str,),
'org_apache_felix_https_keystore_key_password':
(str,),
'org_apache_felix_https_keystore_key_password_type_hint':
(str,),
'org_apache_felix_https_truststore':
(str,),
'org_apache_felix_https_truststore_type_hint':
(str,),
'org_apache_felix_https_truststore_password':
(str,),
'org_apache_felix_https_truststore_password_type_hint':
(str,),
'org_apache_felix_https_clientcertificate':
(str,),
'org_apache_felix_https_clientcertificate_type_hint':
(str,),
'org_apache_felix_https_enable':
(bool,),
'org_apache_felix_https_enable_type_hint':
(str,),
'org_osgi_service_http_port_secure':
(str,),
'org_osgi_service_http_port_secure_type_hint':
(str,),
},
'attribute_map': {
'org_apache_felix_https_nio': 'org.apache.felix.https.nio',
'org_apache_felix_https_nio_type_hint': 'org.apache.felix.https.nio@TypeHint',
'org_apache_felix_https_keystore': 'org.apache.felix.https.keystore',
'org_apache_felix_https_keystore_type_hint': 'org.apache.felix.https.keystore@TypeHint',
'org_apache_felix_https_keystore_password': 'org.apache.felix.https.keystore.password',
'org_apache_felix_https_keystore_password_type_hint': 'org.apache.felix.https.keystore.password@TypeHint',
'org_apache_felix_https_keystore_key': 'org.apache.felix.https.keystore.key',
'org_apache_felix_https_keystore_key_type_hint': 'org.apache.felix.https.keystore.key@TypeHint',
'org_apache_felix_https_keystore_key_password': '<PASSWORD>.https.keystore.key.password',
'org_apache_felix_https_keystore_key_password_type_hint': 'org.apache.felix.https.keystore.key.password@TypeHint',
'org_apache_felix_https_truststore': 'org.apache.felix.https.truststore',
'org_apache_felix_https_truststore_type_hint': 'org.apache.felix.https.truststore@TypeHint',
'org_apache_felix_https_truststore_password': '<PASSWORD>.https.truststore.password',
'org_apache_felix_https_truststore_password_type_hint': 'org.apache.felix.https.truststore.password@TypeHint',
'org_apache_felix_https_clientcertificate': 'org.apache.felix.https.clientcertificate',
'org_apache_felix_https_clientcertificate_type_hint': 'org.apache.felix.https.clientcertificate@TypeHint',
'org_apache_felix_https_enable': 'org.apache.felix.https.enable',
'org_apache_felix_https_enable_type_hint': 'org.apache.felix.https.enable@TypeHint',
'org_osgi_service_http_port_secure': 'org.osgi.service.http.port.secure',
'org_osgi_service_http_port_secure_type_hint': 'org.osgi.service.http.port.secure@TypeHint',
},
'location_map': {
'org_apache_felix_https_nio': 'query',
'org_apache_felix_https_nio_type_hint': 'query',
'org_apache_felix_https_keystore': 'query',
'org_apache_felix_https_keystore_type_hint': 'query',
'org_apache_felix_https_keystore_password': 'query',
'org_apache_felix_https_keystore_password_type_hint': 'query',
'org_apache_felix_https_keystore_key': 'query',
'org_apache_felix_https_keystore_key_type_hint': 'query',
'org_apache_felix_https_keystore_key_password': 'query',
'org_apache_felix_https_keystore_key_password_type_hint': 'query',
'org_apache_felix_https_truststore': 'query',
'org_apache_felix_https_truststore_type_hint': 'query',
'org_apache_felix_https_truststore_password': 'query',
'org_apache_felix_https_truststore_password_type_hint': 'query',
'org_apache_felix_https_clientcertificate': 'query',
'org_apache_felix_https_clientcertificate_type_hint': 'query',
'org_apache_felix_https_enable': 'query',
'org_apache_felix_https_enable_type_hint': 'query',
'org_osgi_service_http_port_secure': 'query',
'org_osgi_service_http_port_secure_type_hint': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__post_config_apache_felix_jetty_based_http_service
)
def __post_config_apache_http_components_proxy_configuration(
self,
**kwargs
):
"""post_config_apache_http_components_proxy_configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_config_apache_http_components_proxy_configuration(async_req=True)
>>> result = thread.get()
Keyword Args:
proxy_host (str): [optional]
proxy_host_type_hint (str): [optional]
proxy_port (int): [optional]
proxy_port_type_hint (str): [optional]
proxy_exceptions ([str]): [optional]
proxy_exceptions_type_hint (str): [optional]
proxy_enabled (bool): [optional]
proxy_enabled_type_hint (str): [optional]
proxy_user (str): [optional]
proxy_user_type_hint (str): [optional]
proxy_password (str): [optional]
proxy_password_type_hint (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.post_config_apache_http_components_proxy_configuration = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/apps/system/config/org.apache.http.proxyconfigurator.config',
'operation_id': 'post_config_apache_http_components_proxy_configuration',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'proxy_host',
'proxy_host_type_hint',
'proxy_port',
'proxy_port_type_hint',
'proxy_exceptions',
'proxy_exceptions_type_hint',
'proxy_enabled',
'proxy_enabled_type_hint',
'proxy_user',
'proxy_user_type_hint',
'proxy_password',
'proxy_password_type_hint',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'proxy_host':
(str,),
'proxy_host_type_hint':
(str,),
'proxy_port':
(int,),
'proxy_port_type_hint':
(str,),
'proxy_exceptions':
([str],),
'proxy_exceptions_type_hint':
(str,),
'proxy_enabled':
(bool,),
'proxy_enabled_type_hint':
(str,),
'proxy_user':
(str,),
'proxy_user_type_hint':
(str,),
'proxy_password':
(str,),
'proxy_password_type_hint':
(str,),
},
'attribute_map': {
'proxy_host': 'proxy.host',
'proxy_host_type_hint': 'proxy.host@TypeHint',
'proxy_port': 'proxy.port',
'proxy_port_type_hint': 'proxy.port@TypeHint',
'proxy_exceptions': 'proxy.exceptions',
'proxy_exceptions_type_hint': 'proxy.exceptions@TypeHint',
'proxy_enabled': 'proxy.enabled',
'proxy_enabled_type_hint': 'proxy.enabled@TypeHint',
'proxy_user': 'proxy.user',
'proxy_user_type_hint': 'proxy.user@TypeHint',
'proxy_password': '<PASSWORD>',
'proxy_password_type_hint': 'proxy.password@TypeHint',
},
'location_map': {
'proxy_host': 'query',
'proxy_host_type_hint': 'query',
'proxy_port': 'query',
'proxy_port_type_hint': 'query',
'proxy_exceptions': 'query',
'proxy_exceptions_type_hint': 'query',
'proxy_enabled': 'query',
'proxy_enabled_type_hint': 'query',
'proxy_user': 'query',
'proxy_user_type_hint': 'query',
'proxy_password': '<PASSWORD>',
'proxy_password_type_hint': 'query',
},
'collection_format_map': {
'proxy_exceptions': 'multi',
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__post_config_apache_http_components_proxy_configuration
)
def __post_config_apache_sling_dav_ex_servlet(
self,
**kwargs
):
"""post_config_apache_sling_dav_ex_servlet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_config_apache_sling_dav_ex_servlet(async_req=True)
>>> result = thread.get()
Keyword Args:
alias (str): [optional]
alias_type_hint (str): [optional]
dav_create_absolute_uri (bool): [optional]
dav_create_absolute_uri_type_hint (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.post_config_apache_sling_dav_ex_servlet = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/apps/system/config/org.apache.sling.jcr.davex.impl.servlets.SlingDavExServlet',
'operation_id': 'post_config_apache_sling_dav_ex_servlet',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'alias',
'alias_type_hint',
'dav_create_absolute_uri',
'dav_create_absolute_uri_type_hint',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'alias':
(str,),
'alias_type_hint':
(str,),
'dav_create_absolute_uri':
(bool,),
'dav_create_absolute_uri_type_hint':
(str,),
},
'attribute_map': {
'alias': 'alias',
'alias_type_hint': 'alias@TypeHint',
'dav_create_absolute_uri': 'dav.create-absolute-uri',
'dav_create_absolute_uri_type_hint': 'dav.create-absolute-uri@TypeHint',
},
'location_map': {
'alias': 'query',
'alias_type_hint': 'query',
'dav_create_absolute_uri': 'query',
'dav_create_absolute_uri_type_hint': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__post_config_apache_sling_dav_ex_servlet
)
def __post_config_apache_sling_get_servlet(
self,
**kwargs
):
"""post_config_apache_sling_get_servlet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_config_apache_sling_get_servlet(async_req=True)
>>> result = thread.get()
Keyword Args:
json_maximumresults (str): [optional]
json_maximumresults_type_hint (str): [optional]
enable_html (bool): [optional]
enable_html_type_hint (str): [optional]
enable_txt (bool): [optional]
enable_txt_type_hint (str): [optional]
enable_xml (bool): [optional]
enable_xml_type_hint (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.post_config_apache_sling_get_servlet = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/apps/system/config/org.apache.sling.servlets.get.DefaultGetServlet',
'operation_id': 'post_config_apache_sling_get_servlet',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'json_maximumresults',
'json_maximumresults_type_hint',
'enable_html',
'enable_html_type_hint',
'enable_txt',
'enable_txt_type_hint',
'enable_xml',
'enable_xml_type_hint',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'json_maximumresults':
(str,),
'json_maximumresults_type_hint':
(str,),
'enable_html':
(bool,),
'enable_html_type_hint':
(str,),
'enable_txt':
(bool,),
'enable_txt_type_hint':
(str,),
'enable_xml':
(bool,),
'enable_xml_type_hint':
(str,),
},
'attribute_map': {
'json_maximumresults': 'json.maximumresults',
'json_maximumresults_type_hint': 'json.maximumresults@TypeHint',
'enable_html': 'enable.html',
'enable_html_type_hint': 'enable.html@TypeHint',
'enable_txt': 'enable.txt',
'enable_txt_type_hint': 'enable.txt@TypeHint',
'enable_xml': 'enable.xml',
'enable_xml_type_hint': 'enable.xml@TypeHint',
},
'location_map': {
'json_maximumresults': 'query',
'json_maximumresults_type_hint': 'query',
'enable_html': 'query',
'enable_html_type_hint': 'query',
'enable_txt': 'query',
'enable_txt_type_hint': 'query',
'enable_xml': 'query',
'enable_xml_type_hint': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__post_config_apache_sling_get_servlet
)
def __post_config_apache_sling_referrer_filter(
self,
**kwargs
):
"""post_config_apache_sling_referrer_filter # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_config_apache_sling_referrer_filter(async_req=True)
>>> result = thread.get()
Keyword Args:
allow_empty (bool): [optional]
allow_empty_type_hint (str): [optional]
allow_hosts (str): [optional]
allow_hosts_type_hint (str): [optional]
allow_hosts_regexp (str): [optional]
allow_hosts_regexp_type_hint (str): [optional]
filter_methods (str): [optional]
filter_methods_type_hint (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.post_config_apache_sling_referrer_filter = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/apps/system/config/org.apache.sling.security.impl.ReferrerFilter',
'operation_id': 'post_config_apache_sling_referrer_filter',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'allow_empty',
'allow_empty_type_hint',
'allow_hosts',
'allow_hosts_type_hint',
'allow_hosts_regexp',
'allow_hosts_regexp_type_hint',
'filter_methods',
'filter_methods_type_hint',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'allow_empty':
(bool,),
'allow_empty_type_hint':
(str,),
'allow_hosts':
(str,),
'allow_hosts_type_hint':
(str,),
'allow_hosts_regexp':
(str,),
'allow_hosts_regexp_type_hint':
(str,),
'filter_methods':
(str,),
'filter_methods_type_hint':
(str,),
},
'attribute_map': {
'allow_empty': 'allow.empty',
'allow_empty_type_hint': 'allow.empty@TypeHint',
'allow_hosts': 'allow.hosts',
'allow_hosts_type_hint': 'allow.hosts@TypeHint',
'allow_hosts_regexp': 'allow.hosts.regexp',
'allow_hosts_regexp_type_hint': 'allow.hosts.regexp@TypeHint',
'filter_methods': 'filter.methods',
'filter_methods_type_hint': 'filter.methods@TypeHint',
},
'location_map': {
'allow_empty': 'query',
'allow_empty_type_hint': 'query',
'allow_hosts': 'query',
'allow_hosts_type_hint': 'query',
'allow_hosts_regexp': 'query',
'allow_hosts_regexp_type_hint': 'query',
'filter_methods': 'query',
'filter_methods_type_hint': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__post_config_apache_sling_referrer_filter
)
def __post_config_property(
self,
config_node_name,
**kwargs
):
"""post_config_property # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_config_property(config_node_name, async_req=True)
>>> result = thread.get()
Args:
config_node_name (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['config_node_name'] = \
config_node_name
return self.call_with_http_info(**kwargs)
self.post_config_property = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/apps/system/config/{configNodeName}',
'operation_id': 'post_config_property',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'config_node_name',
],
'required': [
'config_node_name',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'config_node_name':
(str,),
},
'attribute_map': {
'config_node_name': 'configNodeName',
},
'location_map': {
'config_node_name': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__post_config_property
)
def __post_node(
self,
path,
name,
**kwargs
):
"""post_node # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_node(path, name, async_req=True)
>>> result = thread.get()
Args:
path (str):
name (str):
Keyword Args:
operation (str): [optional]
delete_authorizable (str): [optional]
file (file_type): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['path'] = \
path
kwargs['name'] = \
name
return self.call_with_http_info(**kwargs)
self.post_node = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/{path}/{name}',
'operation_id': 'post_node',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'path',
'name',
'operation',
'delete_authorizable',
'file',
],
'required': [
'path',
'name',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'path':
(str,),
'name':
(str,),
'operation':
(str,),
'delete_authorizable':
(str,),
'file':
(file_type,),
},
'attribute_map': {
'path': 'path',
'name': 'name',
'operation': ':operation',
'delete_authorizable': 'deleteAuthorizable',
'file': 'file',
},
'location_map': {
'path': 'path',
'name': 'path',
'operation': 'query',
'delete_authorizable': 'query',
'file': 'form',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [
'multipart/form-data'
]
},
api_client=api_client,
callable=__post_node
)
def __post_node_rw(
self,
path,
name,
**kwargs
):
"""post_node_rw # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_node_rw(path, name, async_req=True)
>>> result = thread.get()
Args:
path (str):
name (str):
Keyword Args:
add_members (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['path'] = \
path
kwargs['name'] = \
name
return self.call_with_http_info(**kwargs)
self.post_node_rw = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/{path}/{name}.rw.html',
'operation_id': 'post_node_rw',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'path',
'name',
'add_members',
],
'required': [
'path',
'name',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'path':
(str,),
'name':
(str,),
'add_members':
(str,),
},
'attribute_map': {
'path': 'path',
'name': 'name',
'add_members': 'addMembers',
},
'location_map': {
'path': 'path',
'name': 'path',
'add_members': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__post_node_rw
)
def __post_path(
self,
path,
jcrprimary_type,
name,
**kwargs
):
"""post_path # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_path(path, jcrprimary_type, name, async_req=True)
>>> result = thread.get()
Args:
path (str):
jcrprimary_type (str):
name (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['path'] = \
path
kwargs['jcrprimary_type'] = \
jcrprimary_type
kwargs['name'] = \
name
return self.call_with_http_info(**kwargs)
self.post_path = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/{path}/',
'operation_id': 'post_path',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'path',
'jcrprimary_type',
'name',
],
'required': [
'path',
'jcrprimary_type',
'name',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'path':
(str,),
'jcrprimary_type':
(str,),
'name':
(str,),
},
'attribute_map': {
'path': 'path',
'jcrprimary_type': 'jcr:primaryType',
'name': ':name',
},
'location_map': {
'path': 'path',
'jcrprimary_type': 'query',
'name': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__post_path
)
def __post_query(
self,
path,
p_limit,
_1_property,
_1_property_value,
**kwargs
):
"""post_query # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_query(path, p_limit, _1_property, _1_property_value, async_req=True)
>>> result = thread.get()
Args:
path (str):
p_limit (float):
_1_property (str):
_1_property_value (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
str
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['path'] = \
path
kwargs['p_limit'] = \
p_limit
kwargs['_1_property'] = \
_1_property
kwargs['_1_property_value'] = \
_1_property_value
return self.call_with_http_info(**kwargs)
self.post_query = Endpoint(
settings={
'response_type': (str,),
'auth': [
'aemAuth'
],
'endpoint_path': '/bin/querybuilder.json',
'operation_id': 'post_query',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'path',
'p_limit',
'_1_property',
'_1_property_value',
],
'required': [
'path',
'p_limit',
'_1_property',
'_1_property_value',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'path':
(str,),
'p_limit':
(float,),
'_1_property':
(str,),
'_1_property_value':
(str,),
},
'attribute_map': {
'path': 'path',
'p_limit': 'p.limit',
'_1_property': '1_property',
'_1_property_value': '1_property.value',
},
'location_map': {
'path': 'query',
'p_limit': 'query',
'_1_property': 'query',
'_1_property_value': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__post_query
)
def __post_tree_activation(
self,
ignoredeactivated,
onlymodified,
path,
**kwargs
):
"""post_tree_activation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_tree_activation(ignoredeactivated, onlymodified, path, async_req=True)
>>> result = thread.get()
Args:
ignoredeactivated (bool):
onlymodified (bool):
path (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['ignoredeactivated'] = \
ignoredeactivated
kwargs['onlymodified'] = \
onlymodified
kwargs['path'] = \
path
return self.call_with_http_info(**kwargs)
self.post_tree_activation = Endpoint(
settings={
'response_type': None,
'auth': [
'aemAuth'
],
'endpoint_path': '/etc/replication/treeactivation.html',
'operation_id': 'post_tree_activation',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'ignoredeactivated',
'onlymodified',
'path',
],
'required': [
'ignoredeactivated',
'onlymodified',
'path',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'ignoredeactivated':
(bool,),
'onlymodified':
(bool,),
'path':
(str,),
},
'attribute_map': {
'ignoredeactivated': 'ignoredeactivated',
'onlymodified': 'onlymodified',
'path': 'path',
},
'location_map': {
'ignoredeactivated': 'query',
'onlymodified': 'query',
'path': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__post_tree_activation
)
def __post_truststore(
self,
**kwargs
):
"""post_truststore # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_truststore(async_req=True)
>>> result = thread.get()
Keyword Args:
operation (str): [optional]
new_password (str): [optional]
re_password (str): [optional]
key_store_type (str): [optional]
remove_alias (str): [optional]
certificate (file_type): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
str
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.post_truststore = Endpoint(
settings={
'response_type': (str,),
'auth': [
'aemAuth'
],
'endpoint_path': '/libs/granite/security/post/truststore',
'operation_id': 'post_truststore',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'operation',
'new_password',
're_password',
'key_store_type',
'remove_alias',
'certificate',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'operation':
(str,),
'new_password':
(str,),
're_password':
(str,),
'key_store_type':
(str,),
'remove_alias':
(str,),
'certificate':
(file_type,),
},
'attribute_map': {
'operation': ':operation',
'new_password': '<PASSWORD>',
're_password': '<PASSWORD>',
'key_store_type': 'keyStoreType',
'remove_alias': 'removeAlias',
'certificate': 'certificate',
},
'location_map': {
'operation': 'query',
'new_password': 'query',
're_password': 'query',
'key_store_type': 'query',
'remove_alias': 'query',
'certificate': 'form',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'text/plain'
],
'content_type': [
'multipart/form-data'
]
},
api_client=api_client,
callable=__post_truststore
)
def __post_truststore_pkcs12(
self,
**kwargs
):
"""post_truststore_pkcs12 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_truststore_pkcs12(async_req=True)
>>> result = thread.get()
Keyword Args:
truststore_p12 (file_type): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
str
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.post_truststore_pkcs12 = Endpoint(
settings={
'response_type': (str,),
'auth': [
'aemAuth'
],
'endpoint_path': '/etc/truststore',
'operation_id': 'post_truststore_pkcs12',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'truststore_p12',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'truststore_p12':
(file_type,),
},
'attribute_map': {
'truststore_p12': 'truststore.p12',
},
'location_map': {
'truststore_p12': 'form',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'text/plain'
],
'content_type': [
'multipart/form-data'
]
},
api_client=api_client,
callable=__post_truststore_pkcs12
)
|
package releases
import (
"context"
"io"
"net/http"
"net/http/httptest"
"testing"
"gotest.tools/v3/assert"
"gotest.tools/v3/assert/cmp"
)
func TestReleases_Version(t *testing.T) {
ctx := context.Background()
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/release.txt" {
w.WriteHeader(http.StatusNotFound)
return
}
_, _ = io.WriteString(w, "1.2.3-abc")
}))
rel := New(srv.URL)
ver, err := rel.Version(ctx)
assert.Assert(t, err)
assert.Check(t, cmp.Equal(ver, "1.2.3-abc"))
}
func TestReleases_ResolveURL(t *testing.T) {
ctx := context.Background()
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/1.2.3-abc/checksums.txt" {
t.Log(r.URL.Path)
w.WriteHeader(http.StatusNotFound)
return
}
_, _ = io.WriteString(w, `0e4915e71e0c59ab90e7986eb141a5d69baa098c9df122e05e34b46fd2144e1b *darwin/amd64/agent
6615fd0de8f60b07d6659f5e84dee29986d5a7dfbd4b0169dcd9f0d0cf057fdd *darwin/arm64/agent
24a3df3bc4b67763e465d20118e5856b60a1cb70147195177f03f3e948c0ae86 *linux/amd64/agent
42199f7de7bbac08653c1c6ddb16df1c9838f1e852f4583d5dcf20b478055532 *linux/arm64/agent
51ff01417a07dab940eb69078997ec607c0cde6e317c7ff1cdbe353217e7f04e *linux/arm/agent
2706af5f6e6dd19c9fe38725383abcb83da68bc729632dabca2d2bb190591162 *windows/amd64/agent.exe
51ff01417a07dab940eb69078997ec607c0cde6e317c7ff1cdbe353217e7f04g *./linux/arm1/agent
51ff01417a07dab940eb69078997ec607c0cde6e317c7ff1cdbe353217e7f04h */linux/arm2/agent`)
}))
rel := New(srv.URL)
ver, err := rel.ResolveURL(ctx, Requirements{
Version: "1.2.3-abc",
OS: "linux",
Arch: "amd64",
})
assert.Assert(t, err)
assert.Check(t, cmp.Equal(ver, srv.URL+"/1.2.3-abc/linux/amd64/agent"))
ver, err = rel.ResolveURL(ctx, Requirements{
Version: "1.2.3-abc",
OS: "linux",
Arch: "arm1",
})
assert.Assert(t, err)
assert.Check(t, cmp.Equal(ver, srv.URL+"/1.2.3-abc/linux/arm1/agent"))
ver, err = rel.ResolveURL(ctx, Requirements{
Version: "1.2.3-abc",
OS: "linux",
Arch: "arm2",
})
assert.Assert(t, err)
assert.Check(t, cmp.Equal(ver, srv.URL+"/1.2.3-abc/linux/arm2/agent"))
}
|
// CreateCutOverReplicationJobRequest creates a request to invoke CutOverReplicationJob API
func CreateCutOverReplicationJobRequest() (request *CutOverReplicationJobRequest) {
request = &CutOverReplicationJobRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("smc", "2019-06-01", "CutOverReplicationJob", "smc", "openAPI")
request.Method = requests.POST
return
} |
def createDialog(self,
message="Default Message",
title="Default Title",
icon="question",
buttons=["Install", "Cancel"],
cancelButton="Cancel"
) -> str:
return cmds.confirmDialog(
title=title,
message=message,
icon=icon,
button=buttons,
cancelButton=cancelButton,
dismissString=cancelButton
) |
/// Returns all relations referenced by this value.
pub fn deps(&self) -> BTreeSet<String> {
match self {
Value::Aggregate(aggregate) => {
let mut deps: BTreeSet<_> = aggregate
.subquery
.iter()
.flat_map(|clause| clause.deps())
.collect();
deps.extend(aggregate.value.deps());
deps
}
_ => BTreeSet::new(),
}
} |
<filename>test/hand/clone.test.ts
import { Hand, StringParser } from '../../src';
const hand = StringParser.parseHand('KJ965.Q7.96.9874');
describe('Testing Hand.clone', () => {
it('Testing a cloned hand is equal to the original', () => {
expect(Hand.clone(hand)).toStrictEqual(hand);
});
it('Testing modifying a cloned hand does not modify the original', () => {
const clonedHand = Hand.clone(hand);
clonedHand.pop();
expect(hand).not.toStrictEqual(clonedHand);
});
});
|
def _build_enums(self):
for dt in self.enumtypes:
entry = self.enumtypes[dt]
print("building enum class for type "+dt)
print("", file=self.prototypes)
print("enum class "+str(dt)+" : "+str(entry["basetype"])+";", file=self.prototypes)
print("", file=self.header_output)
print("////////////////////////////////////////////////////////", file=self.header_output)
print("enum class "+str(dt)+" : "+str(entry["basetype"])+" {", file=self.header_output)
start_of_list=1
for e in entry["enum_entries"]:
if start_of_list ==1:
start_of_list=0
else:
print(",", file=self.header_output)
print(" "+e, file=self.header_output, end='')
print('', file=self.header_output)
print("};", file=self.header_output)
print('', file=self.header_output)
print("std::string to_string("+str(dt)+" v);", file=self.prototypes)
print("std::string to_string("+str(dt)+" v) {", file=self.header_output)
print(" switch(v) {", file=self.header_output)
for e in entry["enum_entries"]:
print(" case "+str(dt)+"::"+e.split("=")[0]+":", file=self.header_output)
print(" return std::string(\""+e.split("=")[0]+"\");", file=self.header_output)
print(" default:", file=self.header_output)
print(" return std::string(\"unknown value: \"+std::to_string(static_cast<"+str(entry["basetype"])+">(v)));", file=self.header_output)
print(" }", file=self.header_output)
print("}", file=self.header_output)
print('', file=self.header_output)
print("std::ostream& operator <<(std::ostream& os, const "+str(dt)+"& v);", file=self.prototypes)
print("std::ostream& operator <<(std::ostream& os, const "+str(dt)+"& v) {", file=self.header_output)
print(" os << to_string(v);", file=self.header_output )
print(" return os;", file=self.header_output)
print("}", file=self.header_output)
print('', file=self.header_output) |
/**
* Selector which determines for which entities walks shall be generated.
*/
public class LightEntitySelector implements EntitySelector {
/**
* The entities for which walks will be generated.
*/
public HashSet<String> entitiesToProcess;
/**
* The file from which the entities will be read.
*/
public File entityFile;
/**
* Constructor
* @param pathToEntityFile The path to the file which contains the entities for which walks shall be generated. The file must be UTF-8
* encoded.
*/
public LightEntitySelector(String pathToEntityFile){
this.entityFile = new File(pathToEntityFile);
}
/**
* Constructor
*
* @param entityFile The file which contains the entities for which walks shall be generated. The file must be UTF-8
* encoded.
*/
public LightEntitySelector(File entityFile) {
this.entityFile = entityFile;
}
/**
* Default logger.
*/
private static final Logger LOGGER = LoggerFactory.getLogger(LightEntitySelector.class);
/**
* Reads the entities in the specified file into a HashSet.
*
* @param pathToEntityFile The file to be read from. The file must be UTF-8 encoded.
* @return A HashSet of entities.
*/
public static HashSet<String> readEntitiesFromFile(String pathToEntityFile) {
return readEntitiesFromFile(new File(pathToEntityFile));
}
/**
* Reads the entities in the specified file into a HashSet.
*
* @param entityFile The file to be read from. The file must be UTF-8 encoded.
* @return A HashSet of entities.
*/
public static HashSet<String> readEntitiesFromFile(File entityFile) {
HashSet<String> result = new HashSet<>();
if(!entityFile.exists()){
LOGGER.error("The specified entity file does not exist: " + entityFile.getName() + "\nProgram will fail.");
}
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(entityFile), StandardCharsets.UTF_8));
String readLine = "";
while((readLine = reader.readLine()) != null){
result.add(readLine);
}
} catch (FileNotFoundException e) {
LOGGER.error("Failed to read file.", e);
} catch (IOException e) {
LOGGER.error("Failed to read file.", e);
}
LOGGER.info("Number of read entities: " + result.size());
return result;
}
/**
* Constructor
*
* @param entitiesToProcess The entities for which walks will be performed.
*/
public LightEntitySelector(HashSet<String> entitiesToProcess) {
this.entitiesToProcess = entitiesToProcess;
}
@Override
public HashSet<String> getEntities() {
if(this.entitiesToProcess != null) return this.entitiesToProcess;
else {
this.entitiesToProcess = readEntitiesFromFile(this.entityFile);
return this.entitiesToProcess;
}
}
} |
//
// CQResponse.h
// CQRequest
//
// Created by QingGe on 2017/12/7.
// Copyright © 2017年 QingGe. All rights reserved.
//
#import <Foundation/Foundation.h>
@interface CQResponse : NSObject
- (instancetype)initWithURLResponse:(NSURLResponse *)URLResponse
data:(NSData *)data;
@property (nonatomic, strong, readonly) NSURLResponse *URLResponse;
@property (nonatomic, strong, readonly) NSData *data;//请求原数据
@property (nonatomic, strong) NSError *error;//请求错误
//调用后,error会有值 CQRequestErrorDomain
@property (nonatomic, copy) void (^cancelBlock)(void);
//默认为原始数据,如果在CQRequestContext中设置了mapClass和mapper,值为 mapper代理返回数据
@property (nonatomic, strong) id responseObject;
@property (nonatomic, strong) NSDictionary *extraInfo;//扩展信息,预留字段
@end
|
/**
* Represents a quarternion.
* <p>
* A quarternion is an extension of complex-numbers from a 2-dimensional (<em>a
* + b<strong>i</strong></em>) to a 4-dimensional (<em>a + b<strong>i</strong> +
* c<strong>j</strong> + d<strong>k</strong></em>) field. They have many
* applications, including (for us) the ability to efficiently represent spatial
* rotations.
* </p>
*
* @author snowjak88
* @see RotationTransform
*/
public class Quarternion {
private final double a, b, c, d;
private double norm = -1d;
public Quarternion() {
this(0d, 0d, 0d, 0d);
}
public Quarternion(double a, double b, double c, double d) {
this.a = a;
this.b = b;
this.c = c;
this.d = d;
}
/**
* Compute the sum of this and another Quarternion.
*
* @param addend
* @return
*/
public Quarternion add(Quarternion addend) {
//@formatter:off
return new Quarternion(
(this.a + addend.a),
(this.b + addend.b),
(this.c + addend.c),
(this.d + addend.d)
);
//@formatter:on
}
/**
* Compute the difference of this and another Quarternion.
*
* @param subtrahend
* @return
*/
public Quarternion subtract(Quarternion subtrahend) {
//@formatter:off
return new Quarternion(
(this.a - subtrahend.a),
(this.b - subtrahend.b),
(this.c - subtrahend.c),
(this.d - subtrahend.d)
);
//@formatter:on
}
/**
* Compute the Hamiltonian product of this and another Quarternion.
*
* @param other
* @return
*/
public Quarternion multiply(Quarternion other) {
//@formatter:off
return new Quarternion(
(this.a * other.a - this.b * other.b - this.c * other.c - this.d * other.d),
(this.a * other.b + this.b * other.a + this.c * other.d - this.d * other.c),
(this.a * other.c - this.b * other.d + this.c * other.a + this.d * other.b),
(this.a * other.d + this.b * other.c - this.c * other.b + this.d * other.a)
);
//@formatter:on
}
/**
* Compute the scalar product of this Quarternion and a scalar value.
*
* @param scalar
* @return
*/
public Quarternion multiply(double scalar) {
//@formatter:off
return new Quarternion(
(this.a * scalar),
(this.b * scalar),
(this.c * scalar),
(this.d * scalar)
);
//@formatter:on
}
/**
* Compute this Quarternion's conjugate.
*
* @return
*/
public Quarternion conjugate() {
//@formatter:off
return new Quarternion(
(this.a),
(-this.b),
(-this.c),
(-this.d)
);
//@formatter:on
}
/**
* Compute this Quarternion's norm.
*
* @return
*/
public double norm() {
if (this.norm < 0d)
this.norm = FastMath
.sqrt(( this.a * this.a ) + ( this.b * this.b ) + ( this.c * this.c ) + ( this.d * this.d ));
return this.norm;
}
/**
* Normalize this Quarternion -- i.e., convert it into a unit quarternion.
*
* @return
*/
public Quarternion normalize() {
return this.multiply(1d / this.norm());
}
/**
* Compute this Quarternion's reciprocal.
*
* @return
*/
public Quarternion reciprocal() {
return this.conjugate().multiply(this.norm() * this.norm());
}
/**
* Return a List containing all 4 elements of this Quarternion in the order
* <code>{ a, b, c, d }</code>.
*
* @return
*/
public List<Double> toList() {
return Arrays.asList(a, b, c, d);
}
public double getA() {
return a;
}
public double getB() {
return b;
}
public double getC() {
return c;
}
public double getD() {
return d;
}
} |
import { CompanyProfile } from '../data';
const toretskvug: CompanyProfile = {
id: 'toretskvug',
name: '',
shortName: 'Торецьквугілля',
usreou: '33839013',
location: '',
industry: '',
statements: {
'2016': {
assets: {
current: {
quarters: [95814, 123339, false],
year: 140299,
},
fixed: {
quarters: [277135, 273562, false],
year: 265874,
},
totalValue: {
quarters: [496947, 521805, false],
year: 526083,
},
},
equity: {
quarters: [-1609175, -1707808, false],
year: -2032110,
},
producedCost: {
quarters: [186278, 356721, false],
year: 783406,
},
salaryExpenses: {
quarters: [59314, 112152, false],
year: 236418,
},
financials: {
netProfit: {
quarters: [false, false, false],
year: false,
},
netLoss: {
quarters: [109565, 208104, false],
year: 532428,
},
grossProfit: {
quarters: [false, false, false],
year: false,
},
grossLoss: {
quarters: [119332, 236662, false],
year: 546944,
},
netIncome: {
quarters: [66946, 120059, false],
year: 236462,
},
profitGrowth: {
quarters: [false, false, false],
year: false,
},
incomeGrowth: {
quarters: ['135.5%', '65.8%', false],
year: '16.6%',
},
ebitda: {
quarters: [-106698, -202625, false],
year: -513529,
},
ebitdaMargin: {
quarters: ['-159.4%', '-168.8%', false],
year: '-217.2%',
},
},
},
'2017': {
assets: {
current: {
quarters: [117528, 133716, 170835],
year: 170835,
},
fixed: {
quarters: [263064, 259654, 240347],
year: 240347,
},
totalValue: {
quarters: [502111, 514620, 531394],
year: 531394,
},
},
equity: {
quarters: [-2141355, -2202945, -2312163],
year: -2312163,
},
producedCost: {
quarters: [227142, 428790, 792266],
year: 792266,
},
salaryExpenses: {
quarters: [66044, 135289, 268417],
year: 268417,
},
financials: {
netProfit: {
quarters: [false, false, false],
year: false,
},
netLoss: {
quarters: [111016, 177459, 272700],
year: 272700,
},
grossProfit: {
quarters: [false, false, false],
year: false,
},
grossLoss: {
quarters: [141478, 261309, 444660],
year: 444660,
},
netIncome: {
quarters: [85664, 167481, 347606],
year: 347606,
},
profitGrowth: {
quarters: [false, false, false],
year: false,
},
incomeGrowth: {
quarters: ['28.0%', '39.5%', '47.0%'],
year: '47.0%',
},
ebitda: {
quarters: [-108432, -172077, -255404],
year: -255404,
},
ebitdaMargin: {
quarters: ['-126.6%', '-102.7%', '-73.5%'],
year: '-73.5%',
},
},
},
'2018': {
assets: {
current: {
quarters: [183723, 215875, 274988],
year: 274988,
},
fixed: {
quarters: [237893, 239169, 239228],
year: 239228,
},
totalValue: {
quarters: [544119, 578306, 636871],
year: 636871,
},
},
equity: {
quarters: [-2396476, -2463601, -2594364],
year: -2594364,
},
producedCost: {
quarters: [132643, 308215, 671399],
year: 671399,
},
salaryExpenses: {
quarters: [69987, 142705, 296426],
year: 296426,
},
financials: {
netProfit: {
quarters: [false, false, false],
year: false,
},
netLoss: {
quarters: [85883, 144823, 291716],
year: 291716,
},
grossProfit: {
quarters: [false, false, false],
year: false,
},
grossLoss: {
quarters: [84521, 138835, 259456],
year: 259456,
},
netIncome: {
quarters: [48122, 169380, 411943],
year: 411943,
},
profitGrowth: {
quarters: [false, false, false],
year: false,
},
incomeGrowth: {
quarters: ['-43.8%', '1.1%', '18.5%'],
year: '18.5%',
},
ebitda: {
quarters: [-83803, -140687, -274432],
year: -274432,
},
ebitdaMargin: {
quarters: ['-174.1%', '-83.1%', '-66.6%'],
year: '-66.6%',
},
},
},
'2019': {
assets: {
current: {
quarters: [287695, 366272, 326604],
year: 326604,
},
fixed: {
quarters: [314069, 315938, 267782],
year: 267782,
},
totalValue: {
quarters: [601776, 682221, 604678],
year: 604678,
},
},
equity: {
quarters: [-2788486, -2727641, -3050314],
year: -3050314,
},
producedCost: {
quarters: [209250, 354225, 733001],
year: 733001,
},
salaryExpenses: {
quarters: [90280, 184389, 355154],
year: 355154,
},
financials: {
netProfit: {
quarters: [false, false, false],
year: false,
},
netLoss: {
quarters: [109051, 48376, 377245],
year: 377245,
},
grossProfit: {
quarters: [false, false, false],
year: false,
},
grossLoss: {
quarters: [90129, 179825, 422797],
year: 422797,
},
netIncome: {
quarters: [119121, 174400, 310204],
year: 310204,
},
profitGrowth: {
quarters: [false, false, false],
year: false,
},
incomeGrowth: {
quarters: ['147.5%', '3.0%', '-24.7%'],
year: '-24.7%',
},
ebitda: {
quarters: [-106986, -43193, -367738],
year: -367738,
},
ebitdaMargin: {
quarters: ['-89.8%', '-24.8%', '-118.5%'],
year: '-118.5%',
},
},
},
'2020': {
assets: {
current: {
quarters: [282203, 221720],
year: false,
},
fixed: {
quarters: [269813, 280369],
year: false,
},
totalValue: {
quarters: [562306, 512378],
year: false,
},
},
equity: {
quarters: [-3181437, -3331889],
year: false,
},
producedCost: {
quarters: [403379, 739075],
year: false,
},
salaryExpenses: {
quarters: [88794, 178519],
year: false,
},
financials: {
netProfit: {
quarters: [false, false],
year: false,
},
netLoss: {
quarters: [206469, 391855],
year: false,
},
grossProfit: {
quarters: [false, false],
year: false,
},
grossLoss: {
quarters: [264006, 499843],
year: false,
},
netIncome: {
quarters: [139373, 239232],
year: false,
},
profitGrowth: {
quarters: [false, false],
year: false,
},
incomeGrowth: {
quarters: ['17.0%', '37.2%'],
year: false,
},
ebitda: {
quarters: [-202502, -377683],
year: false,
},
ebitdaMargin: {
quarters: ['-145.3%', '-157.9%'],
year: false,
},
},
},
},
};
export default toretskvug;
|
import { constants } from '~common/barrels/constants';
export function getTimezones() {
let timezones: { value: string; label: string }[] = [
{
value: constants.USE_PROJECT_TIMEZONE_VALUE,
label: constants.USE_PROJECT_TIMEZONE_LABEL
},
{
value: 'UTC',
label: 'UTC'
}
];
constants.timezones.forEach(group => {
group.zones.forEach(zone => {
timezones.push({
value: zone.value,
label: `${group.group} - ${zone.name}`
});
});
});
return timezones;
}
|
// SetErrMsgPrinter overrides the default error message printer used to display DDlog error
// messages. An errMsgPrinter set to nil be cause all error messages to be dropped. Concurrent calls
// to the provided errMsgPrinter will be sequential.
func SetErrMsgPrinter(errMsgPrinter ErrMsgPrinter) {
_errMsgMutex.Lock()
defer _errMsgMutex.Unlock()
_errMsgPrinter = errMsgPrinter
} |
M13 bacteriophage DNA inhibits duck hepatitis B virus during acute infection
We investigated effects of various DNAs on duck hepatitis B virus replication in vivo. One‐day‐old ducks were infected intravenously with DHBV. Various DNAs were then injected intravenously, and duck hepatitis B virus levels were followed for up to 20 days after the inoculation. When M13 bacteriophage DNA (M13 DNA), heat‐denatured Escherichia coli DNA or ϕX 174 phage DNA was injected intravenously at a dose of 2.45 mg/kg body wt daily for 10 days, a significant decrease of serum duck hepatitis B virus DNA was detected within 10 days. The efficacy was twice that reported with antisense DNA on a weight basis and far more than that reported on a molar basis. M13 DNA was superior, on the basis of effective dose, to acyclovir as an anti‐duck hepatitis B virus agent. On treatment with M13 DNA, serum 2‐5 A synthetase level was increased five to six times, suggesting that the antiviral effect of M13 DNA is at least partly due to induction of endogenous interferon, which in turn induces 2‐5 A synthetase. No significant inhibitory effect on replication of duck hepatitis B virus was demonstrated by DNAs obtained from herring testes, herring sperm, salmon testes, human placenta or calf thymus. On discontinuation of M13 DNA injection on day 10, duck hepatitis B virus reappeared in the serum at later time points. Digestion of M13 DNA with S1 nuclease resulted in marked reduction of antiviral activity. These results show that M13 DNA, not its digested product, has potent antiviral activity. (HEPATOLOGY 1994;19:1079–1087.) |
Advertisement Female former boxer helps chase down burglars Homeowner fired gun into ground Share Shares Copy Link Copy
An Oak Hill couple has quite a story to tell after chasing down several young people accused of burglarizing their home.On Tuesday afternoon, the couple noticed several people walking on their property at 151 Gary Ave. toward a mother-in-law suite that is connected to the main house.At first the couple thought the group was going to visit their grandson, who lives in the smaller residence. But when the woman noticed her grandson's car wasn't in the driveway, she knew something was up.She saw the group start to burglarize the home and called her husband, who armed himself and fired the gun into the ground, police said."I knew I couldn't shoot them. Legally you can't shoot them when they're running away. So I pumped one into the ground. Which was good, it alerted all my neighbors," said Ira Roberts.One neighbor, Beverly Rose, encountered one of the suspects taking off and tackled him to the ground."I did make a comment to him, how he liked being caught and tackled by a 44-year-old woman; he just kept shaking his head," said Rose.Rose, said she might be older, but used to be a professional boxer.Justin Goodrich, 23, was held to the ground until deputies arrived. He was taken to the hospital after he started having a seizure, investigators said.Two other suspects, Kayla Selph, 23, of New Smyrna Beach, and Alex Safford, 18, of Oak Hill, took off in a vehicle.They were stopped about four miles from the Gary Avenue address.Deputies conducted a ground search near the scene and found two more suspects running into some nearby woods.A sheriff's office K-9 team caught one of them, 26-year-old New Smyrna Beach resident James Watson, but the final suspect, Joseph Jones, eluded deputies. Watson was taken to the hospital to be treated for dog bite wounds.Jones was arrested by Edgewater police at about 7 a.m. on Wednesday.Authorities said Jones was walking along South Ridgewood Avenue when a concerned citizen called police about a suspicious person.Jones was turned over to the Volusia County Sheriff's Office.All of the defendants are charged with burglary of an occupied dwelling and theft. Watson faces an additional charge of resisting arrest without violence.Related: Florida mug shots |
Mouse gestures
Double-click event None Dismiss story Save story Open story Open in new tab Pocket Pushbullet Copy to clipboard
Single-click event None Dismiss story Save story Open story Open in new tab Pocket Pushbullet Copy to clipboard
Click-and-hold event None Dismiss story Save story Open story Open in new tab Pocket Pushbullet Copy to clipboard
Useful links
Help section Open help
Logs section Open logs
Share your feeds Copy link to clipboard
Share your account (will share email and password!) Copy link to clipboard
Share your account (same as above but will unlink it afterwards) Copy link to clipboard
Contributions (BTC, LTC, ETH)
Integration
Pushbullet API key ( get it )
Synchronization settings
Email
Password (don't re-use passwords from other services)
Synchronize dismissed stories
Save configuration Save to server
Update configuration Update on server
Load configuration Load from server
Delete configuration Delete from server
Data management
Export data Export
Import data Import
Export CSV Export CSV
Import CSV Import CSV
Clear all feeds Clear feeds
Clear all logs Clear logs
Reset initialized flag Reset flag
Mobile friendly settings
Hide description*
Hide images*
Disable animations**
Max width for phones
Max width for tablets
Behavior
Enable notifications Request permissions
Snooze notifications Select 1 hour 2 hours 4 hours 8 hours 12 hours 24 hours Cancel snooze
Use dark theme**
Log activities**
Disable paging**
Fetch new stories every (minutes)**
Debug settings
Clear storage** Clear
Generate test data** |
Epidermal effects of tretinoin and isotretinoin: influence of isomerism.
The efficacy of tretinoin is well established in the treatment of acne and photoaged skin, however as a typical side effect of tretinoin treatment most patients develop a low-grade irritant dermatitis. Since isotretinoin topical treatment usually shows much lower incidence and intensity of adverse effects than tretinoin topical treatment, histological studies are needed to scientifically evaluate the effects of isotretinoin application on epidermis and also to assess if it can be used in anti-aging products as an alternative to tretinoin. Thus, the aim of this study was to compare the effects of topical use of tretinoin or isotretinoin on hairless mice epidermis, using appropriate histopathological and histometric techniques, in order to evaluate the influence of isomerism on skin effects. For this, gel cream formulations containing or not 0.05% tretinoin or 0.05% isotretinoin were applied in the dorsum of hairless mice, once a day for seven days. Histopathological evaluation, viable epidermal and horny layer thicknesses as well as the number of epidermal cell layers were determined. Our results showed that tretinoin and isotretinoin were effective in the enhancement of viable epidermis thickness and number of epidermal cell layers, suggesting that they could be used for stimulation of cellular renewal. However isomerism influenced skin effects since isotretinoin had more pronounced effects than tretinoin in viable epidermis. In addition only isotretinoin treatment enhanced horny layer thickness when compared to the gel cream treatment. |
// test1.cpp - Test Inline Operators
#pragma warning (disable:4101)
// disable warning about unreferenced local variables
#include <iostream>
int main()
{
std::cout << "(this program generates no output)\n\n";
struct Package {
long originZip; // 4
long destinationZip; // 4
float shippingPrice; // 4
};
char myChar;
bool myBool;
short myShort;
int myInt;
long myLong;
float myFloat;
double myDouble;
Package myPackage;
long double myLongDouble;
long myLongArray[10];
__asm {
mov eax,myPackage.destinationZip;
mov eax,LENGTH myInt; // 1
mov eax,LENGTH myLongArray; // 10
mov eax,TYPE myChar; // 1
mov eax,TYPE myBool; // 1
mov eax,TYPE myShort; // 2
mov eax,TYPE myInt; // 4
mov eax,TYPE myLong; // 4
mov eax,TYPE myFloat; // 4
mov eax,TYPE myDouble; // 8
mov eax,TYPE myPackage; // 12
mov eax,TYPE myLongDouble; // 8
mov eax,TYPE myLongArray; // 4
mov eax,SIZE myLong; // 4
mov eax,SIZE myPackage; // 12
mov eax,SIZE myLongArray; // 40
}
return 0;
} |
#include "Scanner.hpp"
#include <fstream>
#include <cwctype>
#include "../Utils/Utils.hpp"
#if defined( _WIN32 )
#include <Windows.h>
#else
#include <cstdlib>
#include <clocale>
#endif
#define isHexNumber(c) ( ( c >= L'A' && c <= L'F' ) || ( c >= L'a' && c <= L'f' ) || ( c >= L'0' && c <= L'9' ))
#define isOctalNumber(c) ( c >= L'0' && c <= L'7' )
namespace MaryLang
{
namespace Lexer
{
Scanner::Scanner( char const * filename )
:diag( true ), pos( 0, 0 ),
buffer( nullptr ), current_token( '\n' ),
marker_position( 0 ), begin_mark( 0 ),
buffer_size( 0 )
{
if( SetNewFileName( filename ) ){
Token::InitLookupTable();
} else {
exit( -1 );
}
}
Scanner::~Scanner()
{
delete []buffer;
}
void Scanner::NextChar()
{
if( marker_position == buffer_size ) {
current_token = L'\0';
return;
}
if( current_token == L'\n' ){
++pos._line_number;
pos._column_number = 1;
} else {
++pos._column_number;
}
current_token = buffer[marker_position];
++marker_position;
}
bool Scanner::SetNewFileName( char const * filename )
{
char* buffer;
std::ifstream in( filename, std::ios_base::in | std::ios_base::binary
| std::ios_base::ate );
if( in ) {
// get file size
in.seekg(0, std::ios::end);
buffer_size = (size_t)in.tellg();
in.seekg(0, std::ios::beg);
buffer = (char*)calloc(buffer_size + 1, sizeof(char));
in.read(buffer, buffer_size);
// close file
in.close();
}
else {
std::wcerr << L"Unable to open source file: " << filename << std::endl;
return false;
}
// convert unicode
#if defined( _WIN32 ) && defined( _MSC_VER )
int wsize = MultiByteToWideChar( CP_UTF8, 0, buffer, -1, nullptr, 0);
if( !wsize ) {
std::wcerr << L"Unable to open source file: " << filename << std::endl;
return false;
}
wchar_t* wbuffer = new wchar_t[wsize];
int check = MultiByteToWideChar( CP_UTF8, 0, buffer, -1, wbuffer, wsize);
if(!check) {
std::wcerr << L"Unable to open source file: " << filename << std::endl;
return false;
}
#else
size_t wsize = std::mbstowcs( nullptr, buffer, buffer_size);
if( wsize == (size_t)-1 ) {
free( buffer );
std::wcerr << L"Unable to open source file: " << filename << std::endl;
return false;
}
wchar_t* wbuffer = new wchar_t[wsize + 1];
size_t check = std::mbstowcs( wbuffer, buffer, buffer_size );
if( check == (size_t)-1 ) {
free( buffer );
delete[] wbuffer;
std::wcerr << L"Unable to open source file: " << filename << std::endl;
return false;
}
wbuffer[wsize] = L'\0';
#endif
free(buffer);
this->buffer = wbuffer;
return true;
}
Token Scanner::GetNextToken()
{
for( ; ; )
{
if( EOF != current_token
&& ( std::iswalpha( current_token ) || current_token == L'_' ) )
{
return IdentifierOrKeywordToken();
} else {
switch ( current_token )
{
case L'\0':
marker_position = buffer_size;
return Token( pos, TokenType::TK_EOF );
case L' ':
case L'\r':
case L'\t':
case L'\n':
case L'\v':
case L'\f':
NextChar();
continue;
case L'0':
case L'1':
case L'2':
case L'3':
case L'4':
case L'5':
case L'6':
case L'7':
case L'8':
case L'9':
return GetNumberToken();
case L'"':
case L'\'':
return GetStringLiteralToken();
case L'.':
NextChar();
return Token( pos, TokenType::TK_DOT );
case L'+':
{
Support::Position newPos( pos );
NextChar();
switch( current_token )
{
case L'+': NextChar(); return Token( newPos, TokenType::TK_INCREMENT );
case L'=': NextChar(); return Token( newPos, TokenType::TK_ADDEQL );
default: return Token( newPos, TokenType::TK_ADD );
}
}
case L'-':
{
Support::Position newPos( pos );
NextChar();
switch( current_token )
{
case L'-': NextChar(); return Token( newPos, TokenType::TK_DECREMENT );
case L'>': NextChar(); return Token( newPos, TokenType::TK_ARROW );
case L'=': NextChar(); return Token( newPos, TokenType::TK_SUBEQL );
default: return Token( newPos, TokenType::TK_SUB );
}
}
case L'*':
{
Support::Position newPos( pos );
NextChar();
switch( current_token )
{
case L'=': NextChar(); return Token( newPos, TokenType::TK_MULEQL );
case L'*': NextChar(); return Token( newPos, TokenType::TK_EXP );
default: return Token( newPos, TokenType::TK_MUL );
}
}
case L'/':
{
Support::Position newPos( pos );
NextChar();
switch( current_token )
{
case L'=':
NextChar();
return Token( newPos, TokenType::TK_DIVEQL );
case L'*':
{
NextChar();
while( current_token != EOF &&
!( current_token == L'*' && buffer[ marker_position ] == L'/' )){
NextChar();
}
if( current_token == EOF ) {
throw std::runtime_error( "Unterminated comment" );
}
NextChar();
NextChar();
continue;
}
case L'/':
while( current_token != L'\n' ) NextChar();
continue;
default:
return Token( newPos, TokenType::TK_DIV );
}
}
case L'&':
NextChar();
switch( current_token ){
case L'&': NextChar(); return Token( pos, TokenType::TK_LAND );
case L'=': NextChar(); return Token( pos, TokenType::TK_ANDEQL );
default: return Token( pos, TokenType::TK_AND );
}
case L'|':
NextChar();
switch( current_token ){
case L'|': NextChar(); return Token( pos, TokenType::TK_LOR );
case L'=': NextChar(); return Token( pos, TokenType::TK_OREQL );
default: return Token( pos, TokenType::TK_OR );
}
case L'^':
NextChar();
switch( current_token )
{
case L'=': NextChar(); return Token( pos, TokenType::TK_XORASSIGN );
default: return Token( pos, TokenType::TK_XOR );
}
case L'~': NextChar(); return Token( pos, TokenType::TK_NEG );
case L'!':
NextChar();
switch( current_token ){
case L'=': NextChar(); return Token( pos, TokenType::TK_NOTEQL );
default: return Token( pos, TokenType::TK_NOT );
}
case L'<':
NextChar();
switch ( current_token )
{
case L'<':
NextChar();
switch( current_token ){
case L'=': NextChar(); return Token( pos, TokenType::TK_LSASSIGN );
default: return Token( pos, TokenType::TK_LSHIFT );
}
case L'=': NextChar(); return Token( pos, TokenType::TK_LEQL );
default: return Token( pos, TokenType::TK_LESS );
}
case L'>':
NextChar();
switch ( current_token )
{
case L'>':
NextChar();
switch( current_token )
{
case L'=': NextChar(); return Token( pos, TokenType::TK_RSASSIGN );
default: return Token( pos, TokenType::TK_RSHIFT );
}
case L'=': NextChar(); return Token( pos, TokenType::TK_GEQL );
default: return Token( pos, TokenType::TK_GREATER );
}
case L'%':
NextChar();
switch ( current_token )
{
case L'=': NextChar(); return Token( pos, TokenType::TK_MODASSIGN );
default: return Token( pos, TokenType::TK_MODULO );
}
case L'(':
NextChar(); return Token( pos, TokenType::TK_LPAREN );
case L')':
NextChar(); return Token( pos, TokenType::TK_RPAREN );
case L'[':
NextChar(); return Token( pos, TokenType::TK_LBRACKET );
case L']':
NextChar(); return Token( pos, TokenType::TK_RBRACKET );
case L'{':
NextChar(); return Token( pos, TokenType::TK_LBRACE );
case L'}':
NextChar(); return Token( pos, TokenType::TK_RBRACE );
case L'=':
NextChar();
switch ( current_token )
{
case L'=':
NextChar();
return Token( pos, TokenType::TK_EQL );
default: return Token( pos, TokenType::TK_ASSIGN );
}
case L'@':
NextChar();
return Token( pos, TokenType::TK_AT );
case L':':
NextChar();
return Token( pos, TokenType::TK_COLON );
case L';':
NextChar();
return Token( pos, TokenType::TK_SEMICOLON );
case L',':
NextChar();
return Token( pos, TokenType::TK_COMMA );
default:
NextChar();
diag.Warning( pos, L"Invalid character" );
return Token( pos, TokenType::TK_INVALID );
}
}
}
} // Scanner::getNextToken()
Token Scanner::IdentifierOrKeywordToken()
{
begin_mark = marker_position - 1;
Support::Position newPos = pos;
NextChar();
while( ( std::iswalnum( current_token ) || current_token == L'_' )
&& !( current_token == EOF ) )
{
NextChar();
}
if( current_token == EOF ){
return Token( newPos, TokenType::TK_EOF );
}
return IdentifierOrKeywordToken( newPos );
} // Scanner::identifierOrKeyword
Token Scanner::IdentifierOrKeywordToken( Support::Position pos )
{
auto wsize = marker_position - begin_mark;
wchar_t *tk = Support::Mystrndup( &buffer[begin_mark], wsize - 1 );
if( tk == nullptr ){
std::wcerr << "Allocation/Copy failed" << std::endl;
exit( 1 );
}
auto f = Token::lookup_table.find( tk );
TokenType type = ( f == Token::lookup_table.end() ) ? TokenType::TK_IDENTIFIER : f->second;
return Token( tk, pos, type );
}
Token Scanner::GetIntegerToken()
{
// gather as many digits as we can get. Are we done?
bool isDecimal = false;
do {
NextChar();
} while( std::iswdigit( current_token ) );
// OK! Did we hit a decimal point?
if( current_token == L'.' ){
isDecimal = true;
NextChar();
} else if( current_token == L'E' || current_token == L'e' ) {
// Or perhaps an exponential sign?
NextChar();
// with a positiive or negative sign?
if( ( current_token == L'+' || current_token == L'-' )
&& std::iswdigit( buffer[marker_position] ) )
{
NextChar();
}
}
//lets gather the rest of the circle member and return home.
while( std::iswdigit( current_token )){
NextChar();
}
return Token( Support::Mystrndup(
&buffer[begin_mark], ( marker_position - begin_mark ) - 1 ),
pos, ( isDecimal ? TokenType::TK_DOUBLE : TokenType::TK_INT ) );
}
Token Scanner::GetNumberToken()
{
Support::Position newPos = pos;
begin_mark = marker_position - 1;
wchar_t next_char_lookahead = buffer[marker_position];
if( current_token == L'0' &&
( next_char_lookahead == L'x' || next_char_lookahead == L'X' ) )
{
NextChar();
do {
NextChar();
} while( current_token != EOF && isHexNumber( current_token ));
if( marker_position == EOF ){
diag.Warning( newPos, L"End of file encountered" );
}
return Token( Support::Mystrndup(
&buffer[begin_mark], ( marker_position - begin_mark ) - 1 ),
newPos, TokenType::TK_INT );
} else if( current_token == L'0' && isOctalNumber( next_char_lookahead ) ) {
do {
NextChar();
} while( current_token != EOF && isOctalNumber( current_token ) );
return Token( Support::Mystrndup(
&buffer[begin_mark], ( marker_position - begin_mark ) - 1 ),
newPos, TokenType::TK_INT );
} else if( current_token == L'0' &&
( next_char_lookahead == L'b' || next_char_lookahead == L'B' ))
{
NextChar();
do {
NextChar();
} while( current_token == L'0' || current_token == L'1' );
if( std::iswdigit( current_token ) ){
diag.Warning( pos, L"Invalid binary digit" );
while( std::iswdigit( current_token ) ) NextChar();
}
return Token( Support::Mystrndup(
&buffer[begin_mark], ( marker_position - begin_mark ) - 1 ),
newPos, TokenType::TK_INT );
}
return GetIntegerToken();
} // Scanner::getNumber()
// TO-DO: Recongize string interpolation.
Token Scanner::GetStringLiteralToken()
{
Support::Position newPos ( pos );
std::wstring buf;
int const delimeter = current_token;
NextChar();
bool hasError = false, strInterpolOpen = false, strInterpolAvailable = false;
for( ; ; ){
if( current_token == EOF || current_token == L'\n' ){
diag.Error( pos, L"Expected a delimeter in string." );
return Token( pos, TokenType::TK_INVALID );
marker_position = buffer_size;
}
if( current_token == delimeter ){
NextChar();
break;
} else if( current_token == L'\\' ) {
wchar_t peek = buffer[marker_position];
if( peek == delimeter ){
NextChar();
}
} else if( current_token == L'#' ){
buf.push_back( current_token );
NextChar();
if( current_token == L'{' ){
buf.push_back( current_token );
strInterpolOpen = true;
NextChar();
}
continue;
} else if( current_token == L'}' && strInterpolOpen ){
strInterpolOpen = false;
strInterpolAvailable = true;
}
buf.push_back( current_token );
NextChar();
}
return Token( Support::Mystrndup( buf.c_str(), buf.size() ),
newPos,
( strInterpolAvailable ? TokenType::TK_STRLITINTERPOL : TokenType::TK_STRLITERAL ) );
}
} // namespace Lexer
} // namespace MaryLang
|
/*
* Copyright 2012-2023 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.autoconfigure.graphql.servlet;
import java.util.Collections;
import java.util.Map;
import graphql.GraphQL;
import jakarta.websocket.server.ServerContainer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.aot.hint.RuntimeHints;
import org.springframework.aot.hint.RuntimeHintsRegistrar;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.boot.autoconfigure.AutoConfiguration;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.condition.ConditionalOnWebApplication;
import org.springframework.boot.autoconfigure.graphql.GraphQlAutoConfiguration;
import org.springframework.boot.autoconfigure.graphql.GraphQlCorsProperties;
import org.springframework.boot.autoconfigure.graphql.GraphQlProperties;
import org.springframework.boot.autoconfigure.http.HttpMessageConverters;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.ImportRuntimeHints;
import org.springframework.core.annotation.Order;
import org.springframework.core.log.LogMessage;
import org.springframework.graphql.ExecutionGraphQlService;
import org.springframework.graphql.execution.GraphQlSource;
import org.springframework.graphql.server.WebGraphQlHandler;
import org.springframework.graphql.server.WebGraphQlInterceptor;
import org.springframework.graphql.server.webmvc.GraphQlHttpHandler;
import org.springframework.graphql.server.webmvc.GraphQlWebSocketHandler;
import org.springframework.graphql.server.webmvc.GraphiQlHandler;
import org.springframework.graphql.server.webmvc.SchemaHandler;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpMethod;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.converter.GenericHttpMessageConverter;
import org.springframework.http.converter.HttpMessageConverter;
import org.springframework.web.cors.CorsConfiguration;
import org.springframework.web.servlet.HandlerMapping;
import org.springframework.web.servlet.config.annotation.CorsRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
import org.springframework.web.servlet.function.RequestPredicates;
import org.springframework.web.servlet.function.RouterFunction;
import org.springframework.web.servlet.function.RouterFunctions;
import org.springframework.web.servlet.function.ServerRequest;
import org.springframework.web.servlet.function.ServerResponse;
import org.springframework.web.socket.WebSocketHandler;
import org.springframework.web.socket.server.support.DefaultHandshakeHandler;
import org.springframework.web.socket.server.support.WebSocketHandlerMapping;
/**
* {@link EnableAutoConfiguration Auto-configuration} for enabling Spring GraphQL over
* Spring MVC.
*
* @author Brian Clozel
* @since 2.7.0
*/
@AutoConfiguration(after = GraphQlAutoConfiguration.class)
@ConditionalOnWebApplication(type = ConditionalOnWebApplication.Type.SERVLET)
@ConditionalOnClass({ GraphQL.class, GraphQlHttpHandler.class })
@ConditionalOnBean(ExecutionGraphQlService.class)
@EnableConfigurationProperties(GraphQlCorsProperties.class)
@ImportRuntimeHints(GraphQlWebMvcAutoConfiguration.GraphiQlResourceHints.class)
public class GraphQlWebMvcAutoConfiguration {
private static final Log logger = LogFactory.getLog(GraphQlWebMvcAutoConfiguration.class);
@SuppressWarnings("removal")
private static final MediaType[] SUPPORTED_MEDIA_TYPES = new MediaType[] { MediaType.APPLICATION_GRAPHQL_RESPONSE,
MediaType.APPLICATION_JSON, MediaType.APPLICATION_GRAPHQL };
@Bean
@ConditionalOnMissingBean
public GraphQlHttpHandler graphQlHttpHandler(WebGraphQlHandler webGraphQlHandler) {
return new GraphQlHttpHandler(webGraphQlHandler);
}
@Bean
@ConditionalOnMissingBean
public WebGraphQlHandler webGraphQlHandler(ExecutionGraphQlService service,
ObjectProvider<WebGraphQlInterceptor> interceptors) {
return WebGraphQlHandler.builder(service).interceptors(interceptors.orderedStream().toList()).build();
}
@Bean
@Order(0)
public RouterFunction<ServerResponse> graphQlRouterFunction(GraphQlHttpHandler httpHandler,
GraphQlSource graphQlSource, GraphQlProperties properties) {
String path = properties.getPath();
logger.info(LogMessage.format("GraphQL endpoint HTTP POST %s", path));
RouterFunctions.Builder builder = RouterFunctions.route();
builder = builder.GET(path, this::onlyAllowPost);
builder = builder.POST(path, RequestPredicates.contentType(MediaType.APPLICATION_JSON)
.and(RequestPredicates.accept(SUPPORTED_MEDIA_TYPES)), httpHandler::handleRequest);
if (properties.getGraphiql().isEnabled()) {
GraphiQlHandler graphiQLHandler = new GraphiQlHandler(path, properties.getWebsocket().getPath());
builder = builder.GET(properties.getGraphiql().getPath(), graphiQLHandler::handleRequest);
}
if (properties.getSchema().getPrinter().isEnabled()) {
SchemaHandler schemaHandler = new SchemaHandler(graphQlSource);
builder = builder.GET(path + "/schema", schemaHandler::handleRequest);
}
return builder.build();
}
private ServerResponse onlyAllowPost(ServerRequest request) {
return ServerResponse.status(HttpStatus.METHOD_NOT_ALLOWED).headers(this::onlyAllowPost).build();
}
private void onlyAllowPost(HttpHeaders headers) {
headers.setAllow(Collections.singleton(HttpMethod.POST));
}
@Configuration(proxyBeanMethods = false)
public static class GraphQlEndpointCorsConfiguration implements WebMvcConfigurer {
final GraphQlProperties graphQlProperties;
final GraphQlCorsProperties corsProperties;
public GraphQlEndpointCorsConfiguration(GraphQlProperties graphQlProps, GraphQlCorsProperties corsProps) {
this.graphQlProperties = graphQlProps;
this.corsProperties = corsProps;
}
@Override
public void addCorsMappings(CorsRegistry registry) {
CorsConfiguration configuration = this.corsProperties.toCorsConfiguration();
if (configuration != null) {
registry.addMapping(this.graphQlProperties.getPath()).combine(configuration);
}
}
}
@Configuration(proxyBeanMethods = false)
@ConditionalOnClass({ ServerContainer.class, WebSocketHandler.class })
@ConditionalOnProperty(prefix = "spring.graphql.websocket", name = "path")
public static class WebSocketConfiguration {
@Bean
@ConditionalOnMissingBean
public GraphQlWebSocketHandler graphQlWebSocketHandler(WebGraphQlHandler webGraphQlHandler,
GraphQlProperties properties, HttpMessageConverters converters) {
return new GraphQlWebSocketHandler(webGraphQlHandler, getJsonConverter(converters),
properties.getWebsocket().getConnectionInitTimeout());
}
private GenericHttpMessageConverter<Object> getJsonConverter(HttpMessageConverters converters) {
return converters.getConverters()
.stream()
.filter(this::canReadJsonMap)
.findFirst()
.map(this::asGenericHttpMessageConverter)
.orElseThrow(() -> new IllegalStateException("No JSON converter"));
}
private boolean canReadJsonMap(HttpMessageConverter<?> candidate) {
return candidate.canRead(Map.class, MediaType.APPLICATION_JSON);
}
@SuppressWarnings("unchecked")
private GenericHttpMessageConverter<Object> asGenericHttpMessageConverter(HttpMessageConverter<?> converter) {
return (GenericHttpMessageConverter<Object>) converter;
}
@Bean
public HandlerMapping graphQlWebSocketMapping(GraphQlWebSocketHandler handler, GraphQlProperties properties) {
String path = properties.getWebsocket().getPath();
logger.info(LogMessage.format("GraphQL endpoint WebSocket %s", path));
WebSocketHandlerMapping mapping = new WebSocketHandlerMapping();
mapping.setWebSocketUpgradeMatch(true);
mapping.setUrlMap(Collections.singletonMap(path,
handler.initWebSocketHttpRequestHandler(new DefaultHandshakeHandler())));
mapping.setOrder(2); // Ahead of HTTP endpoint ("routerFunctionMapping" bean)
return mapping;
}
}
static class GraphiQlResourceHints implements RuntimeHintsRegistrar {
@Override
public void registerHints(RuntimeHints hints, ClassLoader classLoader) {
hints.resources().registerPattern("graphiql/index.html");
}
}
}
|
//------------------------------------------------------------------------
// Compiler::optUnmarkCSE
//
// Arguments:
// tree - A sub tree that originally was part of a CSE use
// that we are currently in the process of removing.
//
// Return Value:
// Returns true if we can safely remove the 'tree' node.
// Returns false if the node is a CSE def that the caller
// needs to extract and preserve.
//
// Notes:
// If 'tree' is a CSE use then we perform an unmark CSE operation
// so that the CSE used counts and weight are updated properly.
// The only caller for this method is optUnmarkCSEs which is a
// tree walker vistor function. When we return false this method
// returns WALK_SKIP_SUBTREES so that we don't visit the remaining
// nodes of the CSE def.
//
bool Compiler::optUnmarkCSE(GenTree* tree)
{
if (!IS_CSE_INDEX(tree->gtCSEnum))
{
return true;
}
make sure it's been initialized
noway_assert(optCSEweight <= BB_MAX_WEIGHT);
Is this a CSE use?
if (IS_CSE_USE(tree->gtCSEnum))
{
unsigned CSEnum = GET_CSE_INDEX(tree->gtCSEnum);
CSEdsc* desc = optCSEfindDsc(CSEnum);
#ifdef DEBUG
if (verbose)
{
printf("Unmark CSE use #%02d at ", CSEnum);
printTreeID(tree);
printf(": %3d -> %3d\n", desc->csdUseCount, desc->csdUseCount - 1);
}
#endif DEBUG
Perform an unmark CSE operation
1. Reduce the nested CSE's 'use' count
noway_assert(desc->csdUseCount > 0);
if (desc->csdUseCount > 0)
{
desc->csdUseCount -= 1;
if (desc->csdUseWtCnt < optCSEweight)
{
desc->csdUseWtCnt = 0;
}
else
{
desc->csdUseWtCnt -= optCSEweight;
}
}
2. Unmark the CSE infomation in the node
tree->gtCSEnum = NO_CSE;
return true;
}
else
{
It is not safe to remove this node, so we will return false
and the caller must add this node to the side effect list
return false;
}
} |
/**
* A composite unary predicate yielding true when every predicate match (no
* further predicate is evaluated beyond the first returning false)
*
* @param <E> the element Type
* @author rferranti
*/
public class AllMatchingPredicate<E> implements Predicate<E> {
private final Iterable<Predicate<E>> predicates;
public AllMatchingPredicate(Iterable<Predicate<E>> predicates) {
dbc.precondition(predicates != null, "cannot evaluate and(...) of a null iterable of predicates");
this.predicates = predicates;
}
@Override
public boolean test(E element) {
for (Predicate<E> predicate : predicates) {
if (!predicate.test(element)) {
return false;
}
}
return true;
}
} |
/**
*
* @return "Select Astrobee" followed by names of bees (no smartdock)
*/
protected String[] makeListOfAgentNames() {
final List<Agent> agents = ActiveAgentSet.asList();
final List<String> agentStrings = new ArrayList<String>();
for(final Agent a : agents){
if(!a.equals(Agent.SmartDock)){
agentStrings.add(a.name());
}
}
return agentStrings.toArray(new String[agentStrings.size()]);
} |
/**
* The helper class generates several random numbers
* and put results to a file. The file name came as first
* command line argument.
*/
public static class RandomRunner {
private static final int COUNT = 10;
public static void main(String[] args) {
StringBuilder sb = new StringBuilder();
Random rng = Utils.getRandomInstance();
for (int i = 0; i < COUNT; i++) {
sb.append(rng.nextLong()).append(' ');
}
try (PrintWriter pw = new PrintWriter(new FileWriter(args[0]))) {
pw.write(sb.toString());
} catch (IOException ioe) {
throw new Error("TESTBUG: Problem during IO operation with file: " + args[0], ioe);
}
}
} |
N,x=map(int,input().split())
if x==1 or x==2*N-1:
print("No")
exit()
print("Yes")
if N==2:
for i in range(1,4):
print(i)
exit()
x-=1
A=[-1 for i in range(2*N-1)]
B=[0 for i in range(2*N-1)]
if x==1:
A[N-2]=x+1
A[N-1]=x
A[N]=x-1
A[N+1]=x+2
for i in range(4):
B[x-1+i]=1
else:
A[N-2]=x-1
A[N-1]=x
A[N]=x+1
A[N+1]=x-2
for i in range(4):
B[x-2+i]=1
C=[]
for i in range(2*N-1):
if B[i]==0:
C.append(i)
for i in range(2*N-1):
if A[i]!=-1:
continue
A[i]=C[-1]
C.pop()
for i in A:
print(i+1) |
<filename>projects/RTNetwork/Headers/TwitterAccount.h
#pragma once
#include "Texture.h"
#include "Timer.h"
class RTNetwork;
class TwitterAccount
{
protected:
friend class RTNetwork;
class ThumbnailStruct
{
public:
SP<Texture> mTexture = nullptr;
std::string mURL = "";
};
class UserStruct
{
public:
usString mName = std::string("");
unsigned int mFollowersCount = 0;
unsigned int mFollowingCount = 0;
unsigned int mStatuses_count = 0;
std::string UTCTime = "";
ThumbnailStruct mThumb;
};
struct tweet
{
u64 mID;
u32 mRTCount;
u32 mQuoteCount;
std::vector<std::pair<u64,u32>> mReferences;
std::string mRTedUser;
std::vector<std::string> mURLs;
};
UserStruct mUserStruct;
std::vector<tweet> mTweetList;
u64 mID;
std::string mNextTweetsToken="-1";
std::vector<u64> mTweetToRequestList;
std::set<std::string> mUserNameRequestList;
std::set<std::string> mYoutubeVideoList;
std::map<u64, u32> mCountHasRT; // users retweeted by this account
std::map<u64, u32> mCountWasRT; // users who retweeted a tweet from this account
u32 mHasRetweetCount=0;
u32 mWasRetweetCount=0;
u32 mNotRetweetCount=0;
u32 mOwnRetweetCount = 0;
u32 mAllTweetCount = 0;
u32 mConnectionCount =0; // interaction count
float mSourceCoef=0.0f;
// build network and sort it by more interaction to less interaction
// u64 = ID
// u32 = hasRT
// u32 = wasRT
std::vector < std::pair<u64, std::pair<u32,u32>> > mSortedNetwork;
CoreItemSP loadTweetsFile();
void saveTweetsFile(CoreItemSP toSave);
CoreItemSP loadTweetUserFile(u64 twtid);
void saveTweetUserFile(u64 twtid, const std::string& username);
CoreItemSP loadRetweeters(u64 twtid);
void saveRetweeters(u64 twtid, CoreItemSP tosave);
static CoreItemSP loadUserID(const std::string& uname);
static void saveUserID(const std::string& uname, u64 id);
CoreItemSP loadURL(const std::string& shortURL);
void saveURL(const std::string& shortURL, const std::string& fullURL);
CoreItemSP loadYoutubeFile(const std::string& videoID);
void saveYoutubeFile(const std::string& videoID, const std::string& channelID);
void updateTweetList(CoreItemSP currentTwt);
RTNetwork* mSettings = nullptr;
void buildNetworkList();
int mDepth = -1;
bool mIsMandatory = false;
std::vector<std::string> mNeedURLs;
bool mAddToNetwork = true;
public:
void setMandatory(bool m)
{
mIsMandatory = m;
}
bool isMandatory()
{
return mIsMandatory;
}
void setDepth(int d)
{
mDepth = d;
}
int getDepth() const
{
return mDepth;
}
u32 getConnectionCount()
{
return mConnectionCount;
}
u32 getHasRTCount()
{
return mHasRetweetCount;
}
u32 getTweetCount()
{
return mAllTweetCount;
}
u32 getWasRTCount()
{
return mWasRetweetCount;
}
TwitterAccount(RTNetwork* settings) : mSettings(settings)
{
}
bool needMoreTweets();
void addTweets(CoreItemSP json, bool addtofile);
void updateEmbeddedURLList(); // decode urls
void updateTweetRequestList(); // search quoted tweet author
void updateRetweeterList(); // search retweeters authors
void updateUserNameRequest();
void updateStats();
const std::vector < std::pair<u64, std::pair<u32, u32>> >& getSortedNetwork() const
{
return mSortedNetwork;
}
u32 getLinkCoef(TwitterAccount* other);
u32 getLinkCoef(u64 otherID);
u32 getHasRTCoef(u64 otherID);
u32 getWasRTCoef(u64 otherID);
bool needAddToNetwork()
{
return mAddToNetwork;
}
void setNeedAddToNetwork(bool toset)
{
mAddToNetwork = toset;
}
};
|
<filename>src/application/models/user/UserProfile.ts
import User from '../../entities/User';
import { Gender } from '../common/Enum';
export default class UserProfile {
firstName: string;
lastName?: string;
email: string;
avatar?: string;
gender?: Gender;
birthday?: Date;
phone?: string;
address?: string;
culture?: string;
currency?: string;
constructor(data: User) {
this.firstName = data.firstName;
this.lastName = data.lastName;
this.email = data.email;
this.avatar = data.avatar;
this.gender = data.gender;
this.birthday = data.birthday;
this.phone = data.phone;
this.address = data.address;
this.culture = data.culture;
this.currency = data.currency;
}
};
|
<reponame>DiscordGIR/Bloo
import asyncio
import re
import discord
from discord.ext import commands
from utils.config import cfg
from utils.mod.filter import find_triggered_filters
from utils.config import cfg
class TwitterFix(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if not message.guild:
return
if message.guild.id != cfg.guild_id:
return
if message.content is None:
return
tweet_link = re.search(r"https://twitter\.com/[a-z0-9_]{1,15}/status/[\d+]{15,}", message.content)
if tweet_link is None:
return
triggered_words = find_triggered_filters(
message.content, message.author)
if triggered_words:
return
await asyncio.sleep(1)
new_msg = await message.channel.fetch_message(message.id)
if new_msg.embeds:
return
link = tweet_link.group(0)
link = link.replace("twitter.com", "fxtwitter.com")
await message.reply(link, allowed_mentions=discord.AllowedMentions(everyone=False, users=False, roles=False), mention_author=False)
def setup(bot):
bot.add_cog(TwitterFix(bot)) |
#ifndef _MATH_UTILS_H
#define _MATH_UTILS_H
#include <eigen3/Eigen/Eigen>
#include <math.h>
//#include <code_utils/eigen_utils.h>
#include "../eigen_utils.h"
#define DEG2RAD ( M_PI / 180.0 )
#define RAD2DEG ( 180.0 / M_PI )
#define DEG2RADF ( float )( M_PI / 180.0 )
#define RAD2DEGF ( float )( 180.0 / M_PI )
namespace math_utils
{
/// \brief Skew
/// \param vec :input 3-dof vector
/// \return :output skew symmetric matrix
inline Eigen::Matrix3d
vectorToSkew( const Eigen::Vector3d vec )
{
Eigen::Matrix3d mat;
mat << 0.0, -vec( 2 ), vec( 1 ), vec( 2 ), 0.0, -vec( 0 ), -vec( 1 ), vec( 0 ), 0.0;
return mat;
}
template < typename T >
inline Eigen::Matrix< T, 3, 3 >
vectorToSkew( const Eigen::Matrix< T, 3, 1 > vec )
{
Eigen::Matrix< T, 3, 3 > mat;
mat( 0, 0 ) = T( 0 );
mat( 0, 1 ) = -vec( 2 );
mat( 0, 2 ) = vec( 1 );
mat( 1, 0 ) = vec( 2 );
mat( 1, 1 ) = T( 0 );
mat( 1, 2 ) = -vec( 0 );
mat( 2, 0 ) = -vec( 1 );
mat( 2, 1 ) = vec( 0 );
mat( 2, 2 ) = T( 0 );
return mat;
}
/// \brief Skew
/// \param vec :input 3-dof vector
/// \return :output skew symmetric matrix
inline Eigen::Vector3d
skewToVector( const Eigen::Matrix3d mat )
{
return Eigen::Vector3d( mat( 2, 1 ), mat( 0, 2 ), mat( 1, 0 ) );
}
/// \brief eigenQtoCeresQ
/// \param q_eigen
/// \return
template < typename T >
inline T*
eigenQtoCeresQ( const T* const q_eigen )
{
// Eigen convention (x, y, z, w)
// Ceres convention (w, x, y, z)
T q_ceres[4] = { q_eigen[3], q_eigen[0], q_eigen[1], q_eigen[2] };
return q_ceres;
}
/// \brief ceresQtoEigenQ
/// \param q_ceres
/// \return
template < typename T >
inline T*
ceresQtoEigenQ( const T* const q_ceres )
{
// Ceres convention (w, x, y, z)
// Eigen convention (x, y, z, w)
T q_eigen[4] = { q_ceres[1], q_ceres[2], q_ceres[3], q_ceres[0] };
return q_eigen;
}
/// \brief orientaionOfVectors
/// \param v
/// \param theta
/// \return
inline Eigen::Quaterniond
orientaionOfVectors( const Eigen::Vector3d v, const double theta )
{
return Eigen::Quaterniond( cos( 0.5 * theta ), sin( 0.5 * theta ) * v( 0 ), sin( 0.5 * theta ) * v( 1 ),
sin( 0.5 * theta ) * v( 2 ) );
}
/// \brief orientaionOfVectors :orientation betreen vector v2 to vetor v1
/// \param v1 :begin vector
/// \param v2 :end vector
/// \return :q_12
inline Eigen::Quaterniond
orientaionOfVectors( const Eigen::Vector3d v1, const Eigen::Vector3d v2 )
{
double theta = acos( v1.dot( v2 ) );
Eigen::Vector3d Vk = v1.cross( v2 );
return Eigen::Quaterniond( cos( 0.5 * theta ), sin( 0.5 * theta ) * Vk( 0 ), sin( 0.5 * theta ) * Vk( 1 ),
sin( 0.5 * theta ) * Vk( 2 ) );
}
inline Eigen::Matrix3d
eularToDCM( double yaw, double pitch, double roll )
{
// double y = ypr(0) / 180.0 * M_PI;
// double p = ypr(1) / 180.0 * M_PI;
// double r = ypr(2) / 180.0 * M_PI;
Eigen::Matrix3d Rz;
Rz << cos( yaw ), -sin( yaw ), 0, sin( yaw ), cos( yaw ), 0, 0, 0, 1;
Eigen::Matrix3d Ry;
Ry << cos( pitch ), 0., sin( pitch ), 0., 1., 0., -sin( pitch ), 0., cos( pitch );
Eigen::Matrix3d Rx;
Rx << 1., 0., 0., 0., cos( roll ), -sin( roll ), 0., sin( roll ), cos( roll );
return Rz * Ry * Rx;
}
}
#endif // _MATH_UTILS_H
|
An old cardist once said, “think inside the box, everyone else is too busy trying to think outside of it.” At first read, this may seem like an attempt to stifle creativity since “out of the box” thinking is equated with creativity and is encouraged. However, this doesn’t necessarily mean that “in the box” thinking can’t be creative. The quote could be interpreted as inhibiting. Indeed, if you know the quoted cardist, that interpretation may seem like the correct one, but another more useful (and more correct) interpretation is this: look inside the box for things that are neglected. Whatever the meaning of the quote, it leads into the focus of this article, which is to explore, exploit, and employ basic moves as an engine of creativity.
Regarding Basic Moves
Before continuing, the definition of “basic moves” (for the purposes of this article) will be clarified. Basic moves are easy and simple, and uniqueness is a plus.
Easiness is rather self-explanatory— the move shouldn’t be difficult to execute perfectly on a consistent basis. If the move is easy, creating off of it will also be easy since you (should) have some mastery over it or you should be able to quickly develop proficiency.
Simplicity means that the move doesn’t have multiple components or a multitude of movements. Compare Kryptonite and the Cobra Cut to a Revolution Cut and a Charlier. The latter are obviously the basic moves since they are composed of relatively fewer movements and mechanics than the former.
In these aspects, creating with basic moves is more beneficial than with complex ones. Basic moves are easy to perform, break down, and explore. However, more complex moves could be used for these purposes as well if they are broken down into components that you want to explore. For example, examining the midway packet fold in Nikolaj’s Underscore rather than the cut as a whole.
Finally, Uniqueness: this trait refers not only to the way a move looks, but (perhaps more importantly) the finger, hand, arm, etc. positions it presents relative to the cards. Said positions are points at which you can branch off of. More varied positions will lead to more different looking outcomes. That being said, it would behoove you to seek out unfamiliar basic moves as they will provide you with new positions and new stepping stones. The Encyclopedia of Playing Card Flourishes by Jerry Cestkowski is a great resource for such moves, but contemporary sources are also useful (and easier to get a hold of). Now that you know what to look for, the important thing is to find and recognize all of the positions and intricacies that a single basic move or movement contains.
Dissecting Basic Moves
As noted, it’s important to be innately familiar with a move in order to get something new out of it. What follows are some ways to accomplish this.
Perform the move. This may seem needlessly obvious, especially when you have a good grasp of the move, but it can still be helpful by doing it in different ways. Going through the motions of a move slowly (about as slowly as possible) will reveal the full range of motions it presents for the cards and for your fingers. Another way to explore through performing is stopping the move halfway (or at any other point).
While performing the move you should constantly be noting the relationship between the position of cards and fingers. During the course of a move these positions are in constant flux and each change presents an opportunity to branch off or do something different with the move. Some positions are more useful than others — a great example of this is the used and overused Z-grip, which spawned (is spawning) a myriad of very unique moves. All the while, some other position or grip (like those offered in Jerry Cestkowski’s Gearscrew Cut) has faded into obscurity or been overlooked. That being said, it’s worth exploring non-current, non-comfy, and otherwise non-used moves before discounting them; they may not be the next Z-grip, but they may still present something really interesting.
There are a variety of ways to branch off of positions: you can do weird things, useless things, things that might not work, explore the range of motions that are allowed, etc. Think about the different movements you can make at certain point in a move. Get into the habit of asking yourself what you can do differently, what might flow well, or even what might feel comfortable (#comfycardistry) at a certain point in the move. This may seem vague and unclear, but the examples below should help to clarify.
Apart from studying the positions afforded by a move, you should also look at the move as a whole. For example, you may not find many useful positions in a certain move, but it may fit into another move in the form of an opener, closer, or other movement.
Note: When creating off of another move, don’t think that you’re simply creating a variation of a move, but envision the move as a component or a kick start to something new. In other words, try to go beyond making a variation and do something different.
Basic Move Success Stories
Many of my moves are modifications of basic moves or they arose while messing around with one. Below are a few examples of this along with the thought process behind them.
Piano Cut
This mechanical looking flip-flop running cut has humble origins in the posterchild of basic moves: the Charlier Cut (it’s a great move to use for a creativity exercise). The process of deriving the Piano Cut from the Charlier resembled this:
•Do a Charlier halfway. At this point, all fingers are pretty free, but the thumb, index, and pinky are in a particularly good spot to act on the cards.
•Lever the bottom pack down with the index finger. This levering motion came to mind since the set-up to Prism has a near identical movement, albeit with the pack in the other orientation. Here the thumb pack can drop and the cut can close, which is how the move stayed for a while until the next step was realized.
•Drop the thumb pack and lever with the pinky. This mirroring motion led to the completion of the cut. From here, the index pack can be dropped and then (surprise) be recycled with the motions of a Charlier cut. Ad infinitum.
Untitled
A nameless packet sliding, hand tilting, feel good move, which all started with a Scissor Cut. It came up after noticing that Scissor Cuts don’t open or close with the packets linear, i.e. from a side view, the thumb-index pack is at a slight angle relative to the index-pinky pack. By retaining this angle throughout the close, the index finger can easily push the thumb-index pack along the face of the other. After this initial slide, the pack ended up mirrored in the same angle, allowing another slide (with the available fingers). From here, I looked for ways to keep this packet sliding pattern going until a suitable closing position was reached.
Six Pack
This efficient packet-multiplying display was discovered while tinkering with corner brackets (see fig. 1.0), but it lacked an opener.
fig. 1.0
I needed a quick and simple method (maybe a basic method?) to align two packs next to each other at 90°. I was having trouble figuring out such an opener on my own. It’s a simple, but tricky problem to solve (feel free to explore ways to accomplish this, as another creativity exercise). I scrounged around the EOPCF for some answers and was reminded of Brian Tudor’s Revolution III when I came across Revolution cuts. Executing Revolution III halfway through aligns the packets and, with some help from the other hand, they end up ready to split.
Pincer Rotators
This packet reversing “cut” is the result of exploring the Pincer Grip Shuffle from the EOPCF. While in the exploration process I came across Jason Soll’s T3 cut, which has a very similar packet flipping mechanic. Combining this simple long-sided flip motion with the Pincer Grip Shuffle resulted in, for lack of a more creative name, Pincer Rotators.
So, Basically…
It seems fitting to end on another quote from another old cardist: “Trying to create a one-handed cut is not fucking easy.” There isn’t an obviously alternate way to interpret this one, nor should there be — I agree with it. Trying to create one-handed cuts or any kind of flourish is not easy. Besides just coming up with a move, you still have to iron out all the kinks, practice to proficiency, get feedback on it, repeat, and more. These are all crucial components to making a move, but their elaboration is another article. For now though, I hope this article has at least made the “come up with a move” part a less daunting task and inspired you to think inside the box of basic moves.
What did you think of the article? Do you have any such basic move success stories? Do you wish I listed more varied examples? Has this article helped you? Let me know in the comments! |
def saypager(n):
def decorator(say):
def f(self, msg, request=None, *args, **kwargs):
for i in range(0, len(msg), n):
say(self, msg=msg[i:i + n], request=request, *args, **kwargs)
return f
return decorator |
def _to_image(self, metadata):
fingerprint = metadata.get('fingerprint')
aliases = metadata.get('aliases', [])
if aliases:
name = metadata.get('aliases')[0].get('name')
else:
name = metadata.get('properties', {}).get('description') \
or fingerprint
version = metadata.get('update_source', {}).get('alias')
extra = metadata
return ContainerImage(id=fingerprint, name=name, path=None,
version=version, driver=self, extra=extra) |
/**
* Visitor to fetch attribute values for CustomClassFromMap.
*/
private static class CustomMapDocumentVisitor implements DocumentVisitor<CustomClassFromMap> {
@Override
public CustomClassFromMap visitNull() { return null; }
@Override
public CustomClassFromMap visitBoolean(Boolean document) { return null; }
@Override
public CustomClassFromMap visitString(String document) { return null; }
@Override
public CustomClassFromMap visitNumber(SdkNumber document) { return null; }
@Override
public CustomClassFromMap visitMap(Map<String, Document> documentMap) {
CustomClassFromMap customClassFromMap = new CustomClassFromMap();
documentMap.entrySet().stream().forEach(stringDocumentEntry -> {
if ("innerStringField".equals(stringDocumentEntry.getKey())) {
customClassFromMap.setInnerStringField(stringDocumentEntry.getValue().accept(new StringDocumentVisitor()));
} else if ("innerIntField".equals(stringDocumentEntry.getKey())) {
customClassFromMap.setInnerIntField(stringDocumentEntry.getValue().accept(new NumberDocumentVisitor()).intValue());
}
});
return customClassFromMap;
}
@Override
public CustomClassFromMap visitList(List<Document> documentList) {
return null;
}
} |
// Request makes a (GET/POST/...) Requests to Beam REST API with JSON data.
// All the other Beam REST Calls in this file use this function.
func (s *Session) Request(method, urlStr string, data interface{}) (response []byte, err error) {
if s.Debug {
fmt.Println("API REQUEST PAYLOAD :: [" + fmt.Sprintf("%+v", data) + "]")
}
var body []byte
if data != nil {
body, err = json.Marshal(data)
if err != nil {
return
}
}
return s.request(method, urlStr, "application/json", body)
} |
<reponame>mkevenaar/chocolatey-vscode<filename>src/ChocolateyCliManager.ts<gh_stars>10-100
import { window, QuickPickItem, workspace, Uri } from "vscode";
import { ChocolateyOperation } from "./ChocolateyOperation";
import * as path from "path";
import * as xml2js from "xml2js";
import * as fs from "fs";
import { getPathToChocolateyConfig, getPathToChocolateyTemplates } from "./config";
export class ChocolateyCliManager {
public new(uri: Uri | undefined): void {
window.showInputBox({
prompt: "Name for new Chocolatey Package?"
}).then((result) => {
if (!result || result === "") {
return;
}
let availableTemplates : Array<QuickPickItem> = this._findPackageTemplates().map((filepath) => {
return {
label: path.basename(filepath),
};
});
if (availableTemplates.length > 0) {
availableTemplates.unshift({label: "Default Template" });
window.showQuickPick(availableTemplates, {
placeHolder: "Available templates"
}).then(template => {
let chocoArguments: Array<string> = ["new", result];
if (template && template.label !== "Default Template") {
chocoArguments.push(`--template-name="'${template.label}'"`);
}
if (uri && this._isDirectory(uri.fsPath)) {
chocoArguments.push(`--output-directory="'${uri.fsPath}'"`)
}
let chocoProperties = readChocoProperties();
if (chocoProperties) {
for (let property of chocoProperties) {
chocoArguments.push(`"${property.key}=${property.value}"`);
}
}
let newOp: ChocolateyOperation = new ChocolateyOperation(chocoArguments);
newOp.run();
});
} else {
let newOp: ChocolateyOperation = new ChocolateyOperation(["new", result]);
newOp.run();
}
});
function readChocoProperties() {
const config = workspace.getConfiguration("chocolatey.commands.new");
let result = new Array<{key:string,value:string}>();
if (config === undefined) { return result;}
const properties = config.get<any>("properties");
if (properties === undefined) { return result; }
for (const key in properties) {
result.push({key: key, value: properties[key] });
}
return result;
}
}
public pack(): void {
workspace.findFiles("**/*.nuspec").then((nuspecFiles) => {
if(nuspecFiles.length ===0) {
window.showErrorMessage("There are no nuspec files in the current workspace.");
return;
}
let quickPickItems: Array<QuickPickItem> = nuspecFiles.map((filePath) => {
return {
label: path.basename(filePath.fsPath),
description: path.dirname(filePath.fsPath)
};
});
if(quickPickItems.length > 1) {
quickPickItems.unshift({label: "All nuspec files"});
}
window.showQuickPick(quickPickItems, {
placeHolder: "Available nuspec files..."
}).then((nuspecSelection) => {
if(!nuspecSelection) {
return;
}
window.showInputBox({
prompt: "Additional command arguments?"
}).then((additionalArguments) => {
if(nuspecSelection.label === "All nuspec files") {
quickPickItems.forEach((quickPickItem) => {
if(!additionalArguments || additionalArguments === "") {
additionalArguments = "";
}
if(quickPickItem.label === "All nuspec files") {
return;
}
let cwd: string = quickPickItem.description ? quickPickItem.description : "";
// tslint:disable-next-line:max-line-length
let packOp: ChocolateyOperation = new ChocolateyOperation(["pack", quickPickItem.label, additionalArguments], { isOutputChannelVisible: true, currentWorkingDirectory: cwd });
packOp.run();
});
} else {
if(!additionalArguments || additionalArguments === "") {
additionalArguments = "";
}
let cwd: string = nuspecSelection.description ? nuspecSelection.description : "";
// tslint:disable-next-line:max-line-length
let packOp: ChocolateyOperation = new ChocolateyOperation(["pack", nuspecSelection.label, additionalArguments], { isOutputChannelVisible: true, currentWorkingDirectory: cwd });
packOp.run();
}
});
});
});
}
public push(): void {
// tslint:disable-next-line:max-line-length
function pushPackage(packages: Array<QuickPickItem>, selectedNupkg: QuickPickItem, allPackages: boolean, source: string, apikey: string): void {
window.showInputBox({
prompt: "Additional command arguments?"
}).then((additionalArguments) => {
let chocolateyArguments: string[] = [];
if(source) {
chocolateyArguments.push("--source=\"'" + source + "'\"");
}
if(apikey) {
chocolateyArguments.push("--api-key=\"'" + apikey + "'\"");
}
if(!additionalArguments || additionalArguments === "") {
additionalArguments = "";
}
chocolateyArguments.push(additionalArguments);
if(allPackages) {
packages.forEach((packageToPush) => {
if(packageToPush.label === "All nupkg files") {
return;
}
let cwd: string = packageToPush.description ? packageToPush.description : "";
chocolateyArguments.unshift(packageToPush.label);
chocolateyArguments.unshift("push");
// tslint:disable-next-line:max-line-length
let pushOp: ChocolateyOperation = new ChocolateyOperation(chocolateyArguments, { isOutputChannelVisible: true, currentWorkingDirectory: cwd });
pushOp.run();
// remove the first three arguments. These will be replaced in next iteration
chocolateyArguments.splice(0, 3);
});
} else {
let cwd: string = selectedNupkg.description ? selectedNupkg.description : "";
chocolateyArguments.unshift(selectedNupkg.label);
chocolateyArguments.unshift("push");
// tslint:disable-next-line:max-line-length
let pushOp: ChocolateyOperation = new ChocolateyOperation(chocolateyArguments, { isOutputChannelVisible: true, currentWorkingDirectory: cwd });
pushOp.run();
}
});
}
function getCustomSource(quickPickItems: Array<QuickPickItem>, nupkgSelection: QuickPickItem): void {
// need to get user to specify source
window.showInputBox({
prompt: "Source to push package(s) to..."
}).then((specifiedSource) => {
window.showInputBox({
prompt: "API Key for Source (if required)..."
}).then((specifiedApiKey) => {
if(!specifiedSource) {
return;
}
// tslint:disable-next-line:max-line-length
pushPackage(quickPickItems, nupkgSelection, nupkgSelection.label === "All nupkg files", specifiedSource, specifiedApiKey === undefined ? "" : specifiedApiKey);
});
});
}
workspace.findFiles("**/*.nupkg").then((nupkgFiles) => {
if(nupkgFiles.length ===0) {
window.showErrorMessage("There are no nupkg files in the current workspace.");
return;
}
let quickPickItems: Array<QuickPickItem> = nupkgFiles.map((filePath) => {
return {
label: path.basename(filePath.fsPath),
description: path.dirname(filePath.fsPath)
};
});
if(quickPickItems.length > 1) {
quickPickItems.unshift({label: "All nupkg files"});
}
window.showQuickPick(quickPickItems, {
placeHolder: "Available nupkg files..."
}).then((nupkgSelection) => {
if(!nupkgSelection) {
return;
}
let parser: xml2js.Parser = new xml2js.Parser();
const contents: string = fs.readFileSync(getPathToChocolateyConfig()).toString();
parser.parseString(contents, function(err: any, result: any): void {
if(err) {
console.log(err);
return;
}
let sourceQuickPickItems: Array<QuickPickItem> = new Array<QuickPickItem>();
if(result.chocolatey.apiKeys[0].apiKeys) {
result.chocolatey.apiKeys[0].apiKeys.forEach((apiKey => {
sourceQuickPickItems.push({
label: apiKey.$.source,
});
}));
}
if(sourceQuickPickItems.length === 0) {
getCustomSource(quickPickItems, nupkgSelection);
} else {
if(sourceQuickPickItems.length > 0) {
sourceQuickPickItems.unshift({label: "Use custom source..."});
}
window.showQuickPick(sourceQuickPickItems, {
placeHolder: "Select configured source..."
}).then((sourceSelection) => {
if(!sourceSelection || !sourceSelection.label) {
return;
}
if(sourceSelection.label === "Use custom source...") {
getCustomSource(quickPickItems, nupkgSelection);
} else {
// tslint:disable-next-line:max-line-length
pushPackage(quickPickItems, nupkgSelection, nupkgSelection.label === "All nupkg files", sourceSelection.label, "");
}
});
}
});
});
});
}
public installTemplates(): void {
const config: any = workspace.getConfiguration("chocolatey").templates;
let chocoArguments: Array<string> = ["install"];
config.names.forEach((name) => {
chocoArguments.push(name);
});
chocoArguments.push(`--source="'${config.source}'"`);
let installTemplatesOp: ChocolateyOperation = new ChocolateyOperation(chocoArguments);
installTemplatesOp.run();
}
public apikey(): void {
window.showInputBox({
prompt: "API Key..."
}).then((apiKey) => {
if(!apiKey) {
return;
}
window.showInputBox({
prompt: "Source..."
}).then((source) => {
if(!source) {
return;
}
let chocolateyArguments: string[] = [];
chocolateyArguments.push("-k=\"'" + apiKey + "'\"");
chocolateyArguments.push("-s=\"'" + source + "'\"");
let apiOp: ChocolateyOperation = new ChocolateyOperation(chocolateyArguments);
apiOp.run()
});
});
}
private _findPackageTemplates(): string[] {
let templateDir: string = getPathToChocolateyTemplates();
if (!templateDir || !fs.existsSync(templateDir) || !this._isDirectory(templateDir)) {
return [];
}
return fs.readdirSync(templateDir).map(name => path.join(templateDir, name)).filter(this._isDirectory);
}
private _isDirectory(path: string):boolean {
return fs.lstatSync(path).isDirectory();
}
}
|
/**
* Run detector and calculate the score whether micro stack pattern was followed.
* @param deploymentModel Deployment model to run the detector on.
* @return Return the computed score.
*/
@Override
public OutputMicroStackDetector calculateScore(DeploymentModel deploymentModel) {
Map<String, Set<RootComponent>> stacks = this.stackFinder.find(deploymentModel);
Map<String, Set<RootComponent>> services = this.serviceFinder.find(deploymentModel);
StackAnalyzer stackAnalyzer = new StackAnalyzer(services, stacks);
int numberOfMicroStacks = stackAnalyzer.getNumberOfMicroStacks();
double Score = getScore(numberOfMicroStacks, services);
OutputMicroStackDetector outputMicroStack = new OutputMicroStackDetector(stacks, services, numberOfMicroStacks, Score);
return outputMicroStack;
} |
/**
* This is the initial processing of an event in the queue.
*
* @return - true if the event could be processed, false otherwise.
* @throws IllegalArgumentException
*/
public boolean processEvent() throws IllegalArgumentException {
boolean result = false;
MsgEvent event = null;
synchronized (queue) {
event = queue.remove();
logger.debug(PC2LogCategory.FSM, name,
name + " QUEUE SIZE - " + queue.size());
}
if (event != null) {
msgQueueIndex = event.getMsgQueueIndex();
result = true;
String eventName = event.getEventName();
rpf.rcvd(event);
curState.processEvent(event);
changeStates(eventName);
}
return result;
} |
# Copyright 2015 <NAME> <<EMAIL>>
#
# This file is part of jvmlaunch. jvmlaunch is BSD-licensed software;
# for copyright information see the LICENSE file.
import os, logging
from pesky.settings import ConfigureError
class Target(object):
"""
"""
def __init__(self, name):
self.name = name
self.class_path = None
def configure(self, settings):
self.class_path = settings.get_str("class path", None)
def get_args(self):
if self.class_path is None:
return []
return ['-cp', self.class_path]
class TargetClass(Target):
"""
"""
def __init__(self, name):
Target.__init__(self, name)
self.main_class = None
def configure(self, settings):
Target.configure(self, settings)
logging.debug("configuring targetclass %s", self.name)
self.main_class = settings.get_str("main class", None)
if self.main_class is None:
raise ConfigureError("no main class specified")
def get_args(self):
return Target.get_args(self) + self.main_class
class TargetJar(Target):
"""
"""
def __init__(self, name):
Target.__init__(self, name)
self.jar_file = None
def configure(self, settings):
Target.configure(self, settings)
logging.debug("configuring targetjar %s", self.name)
self.jar_file = settings.get_path("jar file", None)
if self.jar_file is None:
raise ConfigureError("no jar file specified")
if not os.path.isfile(self.jar_file):
raise ConfigureError("jar file %s doesn't exist" % self.jar_file)
def get_args(self):
return Target.get_args(self) + ['-jar', self.jar_file]
|
import * as assert from 'assert';
import { TestNode, TestList, ArrTestNode, initNewArrTestNode } from './fixture';
import { isMutating, changeComplete, isDataObject, Data } from '../../src';
describe('Lists', () => {
const MP_META_DATA = "ΔMd", MP_SELF = "ΔΔSelf";
@Data class DataList {
list: TestNode[];
}
@Data class NbrList {
list: number[];
}
it('should support the creation of List of Datasets', async function () {
const d = new DataList(), ls = d.list;
assert.equal(isDataObject(ls), true, "ls is a data object");
assert.equal(ls[0], null, "get null on undefined item");
assert.equal(ls.length, 0, "empty list after creation");
assert.equal(isMutating(ls), false, "list is not mutating after creation");
const nd1 = new TestNode();
ls[1] = nd1;
assert.equal(ls.length, 2, "length is 2");
assert.equal(isMutating(ls), true, "list is being changed after first set");
assert.equal(ls[1], nd1, "nd1 at position 1");
await changeComplete(ls);
assert.equal(isMutating(ls), false, "ls is now unchanged");
assert.equal(ls[1], nd1, "nd1 at position 1 in ls");
nd1.value = "v2";
assert.equal(isMutating(ls), true, "ls is changed again");
assert.equal(ls[1]!.value, "v2", "v2 ok");
await changeComplete(ls);
assert.equal(isMutating(ls), false, "ls unchanged again");
});
it('should support the creation of Lists of Numbers', async function () {
const nl = new NbrList(), ls = nl.list;
assert.equal(ls[0], null, "get null on undefined item");
assert.equal(ls.length, 0, "empty list after creation");
assert.equal(isMutating(ls), false, "list is not mutating after creation");
ls[1] = 18;
assert.equal(ls.length, 2, "length is 2");
assert.equal(isMutating(ls), true, "list is mutating after first set");
assert.equal(ls[1], 18, "18 at position 1");
await changeComplete(ls);
assert.equal(isMutating(ls), false, "ls is not mutating after creation");
assert.equal(ls[1], 18, "18 at position 1 in ls");
ls[1] = 19;
assert.equal(isMutating(ls), true, "ls is mutating after item update");
assert.equal(ls[1], 19, "get(1).value returns 19");
await changeComplete(ls);
assert.equal(isMutating(ls), false, "ls is not mutating after creation");
assert.equal(ls[1], 19, "get(1) is 19");
});
it('should accept an array to be set as a list', async function () {
const node = new TestList();
assert.deepEqual(node.list, [], "an empty list is created by default");
const arr = [
new TestNode(),
new TestNode()
]
arr[0].value = "a";
arr[1].value = "b";
node.list = arr;
assert.equal(isMutating(node), true, "node is mutating");
assert.equal(isMutating(node.list), true, "node.list is mutating");
assert.equal(node.list[0].value, "a", "value 0 is 'a'");
assert.equal(node.list[1].value, "b", "value 1 is 'b'");
await changeComplete(node);
assert.equal(isMutating(node), false, "node unchanged");
assert.equal(isMutating(node.list), false, "node.list unchanged");
assert.equal(node.list[0].value, "a", "value is still 'a'");
assert.equal(node.list[1].value, "b", "value 1 is still 'b'");
node.list[1].value = "c";
assert.equal(isMutating(node), true, "node touched");
assert.equal(isMutating(node.list[0]), false, "node.nodeList[0] touched");
assert.equal(isMutating(node.list[1]), true, "node.nodeList[1] touched");
assert.equal(node.list[1].value, "c", "value 1 is 'c'");
});
it('should properly update data lists: nothing -> sthV2 -> sthV3 -> null -> null', async function () {
const node = new ArrTestNode();
assert.equal(isMutating(node), false, "node unchanged");
const itemA = new TestNode();
node.list[0] = itemA;
itemA.value = "A";
assert.equal(isMutating(node), true, "node changed");
await changeComplete(node);
assert.equal(isMutating(node), false, "node unchanged (2)");
assert.equal(node.list[0]!.value, "A", "list[0].value is A");
assert.equal(node.list.length, 1, "node.list has only one item");
node.list[0]!.value = "A2";
await changeComplete(node);
assert.equal(node.list[0]!.value, "A2", "list[0].value is now A2");
node.list[0] = null;
await changeComplete(node);
assert.equal(node.list[0], null, "node list[0] is now null");
assert.equal(node.list.length, 1, "node list.length is still 1");
node.list[0] = null;
assert.equal(isMutating(node), false, "node still unchanged");
});
it('should support adding items', async function () {
const atn = new ArrTestNode();
assert.equal(atn.list.length, 0, "empty list");
let item = new TestNode();
atn.list[0] = item;
item.value = "item 0";
assert.equal(atn.list.length, 1, "1 item list");
assert.equal(atn.list[0]!.value, "item 0", "first item is item 0");
await changeComplete(atn);
assert.equal(atn.list.length, 1, "1 item list (2)");
assert.equal(atn.list[0]!.value, "item 0", "first item is item 0 (2)");
item = new TestNode();
atn.list[2] = item;
item.value = "item 2";
assert.equal(atn.list.length, 3, "3 items in list");
assert.equal(atn.list[2]!.value, "item 2", "3rd item is item 2");
});
it('should support List.push', async function () {
const node = new ArrTestNode();
let item: TestNode;
await changeComplete(node);
item = new TestNode();
item.value = "a";
assert.equal(node.list.length, 0, "empty list");
assert.equal(isMutating(node), false, "node not mutating");
node.list.push(item);
assert.equal(isMutating(node), true, "node now mutating");
assert.equal(node.list.length, 1, "one item in list");
assert.equal(isMutating(node), true, "node is mutating");
await changeComplete(node);
item = new TestNode();
item.value = "b";
assert.equal(node.list.length, 1, "one item in list");
assert.equal(node.list[0]!.value, "a", "value0 is a");
node.list.push(item);
assert.equal(node.list.length, 2, "two items in list");
assert.equal(isMutating(node), true, "node is mutating (2)");
await changeComplete(node);
assert.equal(node.list.length, 2, "two items in list (2)");
assert.equal(node.list[1]!.value, "b", "value1 is b");
});
it('should support List.splice', async function () {
function stringifyList(list) {
const arr: string[] = [];
for (let i = 0; list.length > i; i++) {
itm = list[i];
arr.push(itm ? itm.value : "null");
}
return arr.join("-");
}
const node = new ArrTestNode(),
list = node.list;
let itm: TestNode;
itm = list[0] = new TestNode();
itm.value = "i1";
itm = list[1] = new TestNode();
itm.value = "i2";
itm = list[3] = new TestNode();
itm.value = "i4";
await changeComplete(node);
assert.equal(stringifyList(node.list), "i1-i2-null-i4", "list original content");
assert.equal(isMutating(node), false, "no change on node");
node.list.splice(1, 2);
assert.equal(isMutating(node), true, "splice changed node");
await changeComplete(node);
assert.equal(stringifyList(node.list), "i1-i4", "node.list new content");
node.list.splice(1, 0, new TestNode()); // insert a new item
await changeComplete(node);
assert.equal(stringifyList(node.list), "i1-v1-i4", "node13.list content");
});
it('should support List.forEach', async function () {
const node = initNewArrTestNode();
await changeComplete(node);
const arr: string[] = [];
node.list.forEach((value, index, dList) => {
if (value) {
arr.push(value.value + "/" + index);
assert.equal(dList, node.list["ΔΔList"], "list is dList");
}
});
assert.equal(arr.join("-"), "i1/0-i2/1-i3/2", "forEach result");
assert.equal(isMutating(node), false, "node is unchanged");
const o = {
count: 0,
increment() {
this.count++;
}
}
node.list.forEach(o.increment, o);
assert.equal(o.count, 3, "forEach result with thisArg");
assert.equal(isMutating(node), false, "node is unchanged (2)");
});
TestNode.prototype.toString = function () {
return "TestNode " + this.value;
}
it('should support List.filter', async function () {
const node = initNewArrTestNode();
await changeComplete(node);
const ls = node.list.filter((item: TestNode, index) => {
return (item.value === "i1") || (index === 2);
});
assert.equal(ls.constructor, Array, "ls is an Array");
assert.equal(Array.isArray(ls), true, "Array.isArray(ls) is true");
assert.equal(ls.length, 2, "2 items in the list");
assert.equal(isMutating(node), false, "node11 is unchanged");
assert.equal(ls.join(','), "TestNode i1,TestNode i3", "ls content");
assert.equal((ls[0] as any)[MP_META_DATA].parents, node.list[MP_SELF], "list items still have 1 parent");
});
it('should support toString', async function () {
const nl = new NbrList(), ls = nl.list;
assert.equal(ls.toString(), "Trax List []", "empty list");
ls[0] = 123;
ls[1] = 234;
assert.equal(ls.toString(), "Trax List [123, 234]", "non-empty list");
});
it('should support List.indexOf', async function () {
const node = initNewArrTestNode();
await changeComplete(node);
const itm1 = node.list[1];
assert.equal(node.list.indexOf(itm1), 1, "itm1 index is 1");
});
it('should support list of lists', async function () {
@Data class LsNode {
list: TestNode[][];
}
const ls = new LsNode();
ls.list[0] = [];
const l00 = ls.list[0][0] = new TestNode();
assert.equal(ls.list[0][0].value, "v1", "default value 1");
assert.equal(isMutating(ls.list), true, "l is mutating");
l00.value = "item 00";
assert.equal(ls.list[0][0].value, "item 00", "first item can be retrieved");
await changeComplete(ls);
assert.equal(isMutating(ls.list), false, "l is mutating");
assert.equal(ls.list[0][0].value, "item 00", "first item can be retrieved (2)");
});
});
|
// Test reading a valid customized list of string object.
@Test
public void testGetObjectExceptionCustomized() throws IOException, ClassNotFoundException {
UtilProperties.setPropertyValueInMemory("SafeObjectInputStream", "allowList", "java.util.Arrays.ArrayList,java.lang.String");
testGetObjectExceptionSafe();
UtilProperties.setPropertyValueInMemory("SafeObjectInputStream", "allowList", "java.util.Arrays.ArrayList, java.lang.String");
testGetObjectExceptionSafe();
} |
import { Sequelize } from 'sequelize';
import { SubCategory } from '../types';
const Models = require('../models');
class Repositories {
private defaultAttributes = [Sequelize.literal(`"SubCategories".*`)];
/**
* Get all subcategories
* @param {SubCategory} where
* @param {Record<string, any>} attribute
* @param {Record<string, any>} options
* @returns {Promise<SubCategory[]>}
*/
get(where: SubCategory, attribute?: Record<string, any>, options?: Record<string, any>): Promise<SubCategory[]> {
return new Promise(async (resolve, reject) => {
try {
const attributes = attribute || this.defaultAttributes;
const subcategories = await Models.SubCategories.findAll({
attributes,
where,
raw: true,
...options,
});
resolve(subcategories);
} catch (error) {
reject(error);
}
});
}
/**
* Get one of category
* @param {SubCategory} where
* @param {Record<string, any>} attribute
* @param {Record<string, any>} options
* @returns {Promise<SubCategory>}
*/
getOne(where: SubCategory, attribute?: Record<string, any>, options?: Record<string, any>): Promise<SubCategory> {
return new Promise(async (resolve, reject) => {
try {
const attributes = attribute || this.defaultAttributes;
const category = await Models.SubCategories.findOne({
attributes,
where,
raw: true,
...options,
});
resolve(category);
} catch (error) {
reject(error);
}
});
}
/**
* Save one category
* @param {SubCategory} payload
* @param {Record<string, any>} options
* @returns {Promise<SubCategory>}
*/
save(payload: SubCategory, options?: Record<string, any>): Promise<SubCategory> {
return new Promise(async (resolve, reject) => {
try {
const category = await Models.SubCategories.create(
{
...payload,
},
{
returning: true,
raw: true,
...options,
},
);
resolve(category);
} catch (error) {
reject(error);
}
});
}
/**
* Update one category
* @param {SubCategory} payload
* @param {SubCategory} where
* @param {Record<string, any>} options
* @returns {Promise<boolean>}
*/
update(payload: SubCategory, where?: SubCategory, options?: Record<string, any>): Promise<boolean> {
return new Promise(async (resolve, reject) => {
try {
const category = await Models.SubCategories.update(
{
...payload,
updated_at: Sequelize.literal(`CURRENT_TIMESTAMP`),
},
{
where,
raw: true,
...options,
},
);
if (category[0] < 1) {
resolve(false);
}
resolve(true);
} catch (error) {
reject(error);
}
});
}
/**
* Delete one category
* @param {SubCategory} where
* @param {Record<string, any>} options
* @returns {Promise<boolean>}
*/
delete(where?: SubCategory, options?: Record<string, any>): Promise<boolean> {
return new Promise(async (resolve, reject) => {
try {
const category = await Models.SubCategories.destroy({
where,
...options,
});
if (category[0] < 1) {
resolve(false);
}
resolve(true);
} catch (error) {
reject(error);
}
});
}
}
export default new Repositories();
|
/* ========================================================================== */
/* === Include/Mongoose_IO.hpp ============================================== */
/* ========================================================================== */
/* -----------------------------------------------------------------------------
* Mongoose Graph Partitioning Library, Copyright (C) 2017-2018,
* Scott P. Kolodziej, Nuri S. Yeralan, Timothy A. Davis, William W. Hager
* Mongoose is licensed under Version 3 of the GNU General Public License.
* Mongoose is also available under other licenses; contact authors for details.
* SPDX-License-Identifier: GPL-3.0-only
* -------------------------------------------------------------------------- */
/**
* Simplified I/O functions for reading matrices and graphs
*
* For reading Matrix Market files into Mongoose, read_graph and read_matrix
* are provided (depending on if a Graph class instance or CSparse matrix
* instance is needed). The filename can be specified as either a const char*
* (easier for C programmers) or std::string (easier from C++).
*/
// #pragma once
#ifndef MONGOOSE_IO_HPP
#define MONGOOSE_IO_HPP
#include "Mongoose_CSparse.hpp"
#include "Mongoose_Graph.hpp"
#include "Mongoose_Internal.hpp"
#include <string>
extern "C"
{
#include "mmio.h"
}
namespace Mongoose
{
/**
* Generate a Graph from a Matrix Market file.
*
* Generate a Graph class instance from a Matrix Market file. The matrix
* contained in the file must be sparse, real, and square. If the matrix
* is not symmetric, it will be made symmetric with (A+A')/2. If the matrix has
* more than one connected component, the largest will be found and the rest
* discarded. If a diagonal is present, it will be removed.
*
* @param filename the filename or path to the Matrix Market File.
*/
Graph *read_graph(const std::string &filename);
/**
* Generate a CSparse matrix from a Matrix Market file.
*
* Generate a cs struct instance from a Matrix Market file. The matrix
* contained in the file must be sparse, real, and square. If the matrix
* is not symmetric, it will be made symmetric with (A+A')/2. If the matrix has
* more than one connected component, the largest will be found and the rest
* discarded. If a diagonal is present, it will be removed.
*
* @param filename the filename or path to the Matrix Market File.
* @param matcode the four character Matrix Market type code.
*/
cs *read_matrix(const std::string &filename, MM_typecode &matcode);
/**
* Generate a Graph from a Matrix Market file.
*
* Generate a Graph class instance from a Matrix Market file. The matrix
* contained in the file must be sparse, real, and square. If the matrix
* is not symmetric, it will be made symmetric with (A+A')/2. If the matrix has
* more than one connected component, the largest will be found and the rest
* discarded. If a diagonal is present, it will be removed.
*
* @param filename the filename or path to the Matrix Market File.
*/
Graph *read_graph(const char *filename);
/**
* Generate a CSparse matrix from a Matrix Market file.
*
* Generate a cs struct instance from a Matrix Market file. The matrix
* contained in the file must be sparse, real, and square. If the matrix
* is not symmetric, it will be made symmetric with (A+A')/2. If the matrix has
* more than one connected component, the largest will be found and the rest
* discarded. If a diagonal is present, it will be removed.
*
* @param filename the filename or path to the Matrix Market File.
* @param matcode the four character Matrix Market type code.
*/
cs *read_matrix(const char *filename, MM_typecode &matcode);
} // end namespace Mongoose
#endif
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package go_kafka_client
import (
"time"
"github.com/samuel/go-zookeeper/zk"
"sort"
"fmt"
"strconv"
"encoding/json"
"path"
"errors"
"strings"
)
var (
consumersPath = "/consumers"
brokerIdsPath = "/brokers/ids"
brokerTopicsPath = "/brokers/topics"
)
/* ZookeeperCoordinator implements ConsumerCoordinator interface and is used to coordinate multiple consumers that work within the same consumer group. */
type ZookeeperCoordinator struct {
config *ZookeeperConfig
zkConn *zk.Conn
unsubscribe chan bool
}
func (zc *ZookeeperCoordinator) String() string {
return "zk"
}
/* Creates a new ZookeeperCoordinator with a given configuration. */
func NewZookeeperCoordinator(Config *ZookeeperConfig) *ZookeeperCoordinator {
return &ZookeeperCoordinator{
config: Config,
unsubscribe: make(chan bool),
}
}
/* Establish connection to this ConsumerCoordinator. Returns an error if fails to connect, nil otherwise. */
func (zc *ZookeeperCoordinator) Connect() error {
Infof(zc, "Connecting to ZK at %s\n", zc.config.ZookeeperConnect)
conn, _, err := zk.Connect(zc.config.ZookeeperConnect, zc.config.ZookeeperTimeout)
zc.zkConn = conn
return err
}
/* Registers a new consumer with Consumerid id and TopicCount subscription that is a part of consumer group Groupid in this ConsumerCoordinator. Returns an error if registration failed, nil otherwise. */
func (zc *ZookeeperCoordinator) RegisterConsumer(Consumerid string, Groupid string, TopicCount TopicsToNumStreams) error {
Debugf(zc, "Trying to register consumer %s at group %s in Zookeeper", Consumerid, Groupid)
registryDir := newZKGroupDirs(Groupid).ConsumerRegistryDir
pathToConsumer := fmt.Sprintf("%s/%s", registryDir, Consumerid)
data, mappingError := json.Marshal(&ConsumerInfo{
Version : int16(1),
Subscription : TopicCount.GetTopicsToNumStreamsMap(),
Pattern : TopicCount.Pattern(),
Timestamp : time.Now().Unix(),
})
if mappingError != nil {
return mappingError
}
Debugf(zc, "Path: %s", pathToConsumer)
_, err := zc.zkConn.Create(pathToConsumer, data, zk.FlagEphemeral, zk.WorldACL(zk.PermAll))
if err == zk.ErrNoNode {
err = zc.createOrUpdatePathParentMayNotExist(registryDir, make([]byte, 0))
if err != nil {
return err
}
_, err = zc.zkConn.Create(pathToConsumer, data, zk.FlagEphemeral, zk.WorldACL(zk.PermAll))
} else if err == zk.ErrNodeExists {
_, stat, err := zc.zkConn.Get(pathToConsumer)
if err != nil {
return err
}
_, err = zc.zkConn.Set(pathToConsumer, data, stat.Version)
}
return err
}
/* Deregisters consumer with Consumerid id that is a part of consumer group Groupid form this ConsumerCoordinator. Returns an error if deregistration failed, nil otherwise. */
func (zc *ZookeeperCoordinator) DeregisterConsumer(Consumerid string, Groupid string) error {
pathToConsumer := fmt.Sprintf("%s/%s", newZKGroupDirs(Groupid).ConsumerRegistryDir, Consumerid)
Debugf(zc, "Trying to deregister consumer at path: %s", pathToConsumer)
_, stat, err := zc.zkConn.Get(pathToConsumer)
if (err != nil) {
return err
}
return zc.zkConn.Delete(pathToConsumer, stat.Version)
}
// Gets the information about consumer with Consumerid id that is a part of consumer group Groupid from this ConsumerCoordinator.
// Returns ConsumerInfo on success and error otherwise (For example if consumer with given Consumerid does not exist).
func (zc *ZookeeperCoordinator) GetConsumerInfo(Consumerid string, Groupid string) (*ConsumerInfo, error) {
data, _, err := zc.zkConn.Get(fmt.Sprintf("%s/%s",
newZKGroupDirs(Groupid).ConsumerRegistryDir, Consumerid))
if (err != nil) {
return nil, err
}
consumerInfo := &ConsumerInfo{}
json.Unmarshal(data, consumerInfo)
return consumerInfo, nil
}
// Gets the information about consumers per topic in consumer group Groupid excluding internal topics (such as offsets) if ExcludeInternalTopics = true.
// Returns a map where keys are topic names and values are slices of consumer ids and fetcher ids associated with this topic and error on failure.
func (zc *ZookeeperCoordinator) GetConsumersPerTopic(Groupid string, ExcludeInternalTopics bool) (map[string][]ConsumerThreadId, error) {
consumers, err := zc.GetConsumersInGroup(Groupid)
if (err != nil) {
return nil, err
}
consumersPerTopicMap := make(map[string][]ConsumerThreadId)
for _, consumer := range consumers {
topicsToNumStreams, err := NewTopicsToNumStreams(Groupid, consumer, zc, ExcludeInternalTopics)
if (err != nil) {
return nil, err
}
for topic, threadIds := range topicsToNumStreams.GetConsumerThreadIdsPerTopic() {
for _, threadId := range threadIds {
consumersPerTopicMap[topic] = append(consumersPerTopicMap[topic], threadId)
}
}
}
for topic := range consumersPerTopicMap {
sort.Sort(byName(consumersPerTopicMap[topic]))
}
return consumersPerTopicMap, nil
}
/* Gets the list of all consumer ids within a consumer group Groupid. Returns a slice containing all consumer ids in group and error on failure. */
func (zc *ZookeeperCoordinator) GetConsumersInGroup(Groupid string) ([]string, error) {
Debugf(zc, "Getting consumers in group %s", Groupid)
consumers, _, err := zc.zkConn.Children(newZKGroupDirs(Groupid).ConsumerRegistryDir)
if (err != nil) {
return nil, err
}
return consumers, nil
}
/* Gets the list of all topics registered in this ConsumerCoordinator. Returns a slice conaining topic names and error on failure. */
func (zc *ZookeeperCoordinator) GetAllTopics() ([]string, error) {
topics, _, _, err := zc.zkConn.ChildrenW(brokerTopicsPath)
return topics, err
}
// Gets the information about existing partitions for a given Topics.
// Returns a map where keys are topic names and values are slices of partition ids associated with this topic and error on failure.
func (zc *ZookeeperCoordinator) GetPartitionsForTopics(Topics []string) (map[string][]int32, error) {
result := make(map[string][]int32)
partitionAssignments, err := zc.getPartitionAssignmentsForTopics(Topics)
if (err != nil) {
return nil, err
}
for topic, partitionAssignment := range partitionAssignments {
for partition, _ := range partitionAssignment {
result[topic] = append(result[topic], partition)
}
}
for topic, _ := range partitionAssignments {
sort.Sort(intArray(result[topic]))
}
return result, nil
}
// Gets the information about all Kafka brokers registered in this ConsumerCoordinator.
// Returns a slice of BrokerInfo and error on failure.
func (zc *ZookeeperCoordinator) GetAllBrokers() ([]*BrokerInfo, error) {
Debug(zc, "Getting all brokers in cluster")
brokerIds, _, err := zc.zkConn.Children(brokerIdsPath)
if (err != nil) {
return nil, err
}
brokers := make([]*BrokerInfo, len(brokerIds))
for i, brokerId := range brokerIds {
brokerIdNum, err := strconv.Atoi(brokerId)
if (err != nil) {
return nil, err
}
brokers[i], err = zc.getBrokerInfo(int32(brokerIdNum))
brokers[i].Id = int32(brokerIdNum)
if (err != nil) {
return nil, err
}
}
return brokers, nil
}
// Gets the offset for a given TopicPartition and consumer group Groupid.
// Returns offset on sucess, error otherwise.
func (zc *ZookeeperCoordinator) GetOffsetForTopicPartition(Groupid string, TopicPartition *TopicAndPartition) (int64, error) {
dirs := newZKGroupTopicDirs(Groupid, TopicPartition.Topic)
offset, _, err := zc.zkConn.Get(fmt.Sprintf("%s/%d", dirs.ConsumerOffsetDir, TopicPartition.Partition))
if (err != nil) {
if (err == zk.ErrNoNode) {
return InvalidOffset, nil
} else {
return InvalidOffset, err
}
}
offsetNum, err := strconv.Atoi(string(offset))
if (err != nil) {
return InvalidOffset, err
}
return int64(offsetNum), nil
}
// Notifies consumer group about new deployed topic, which should be taken after current one is exhausted
func (zc *ZookeeperCoordinator) NotifyConsumerGroup(Groupid string, ConsumerId string) error {
path := fmt.Sprintf("%s/%s-%d", newZKGroupDirs(Groupid).ConsumerChangesDir, ConsumerId, time.Now().Nanosecond())
Debugf(zc, "Sending notification to consumer group at %s", path)
return zc.createOrUpdatePathParentMayNotExist(path, make([]byte, 0))
}
// Removes a notification notificationId for consumer group Group
func (zc *ZookeeperCoordinator) PurgeNotificationForGroup(Groupid string, notificationId string) error {
pathToDelete := fmt.Sprintf("%s/%s", newZKGroupDirs(Groupid).ConsumerChangesDir, notificationId)
_, stat, err := zc.zkConn.Get(pathToDelete)
if err != nil && err != zk.ErrNoNode {
return err
}
err = zc.zkConn.Delete(pathToDelete, stat.Version)
if err != nil && err != zk.ErrNoNode {
return err
}
return nil
}
// Subscribes for any change that should trigger consumer rebalance on consumer group Groupid in this ConsumerCoordinator.
// Returns a read-only channel of booleans that will get values on any significant coordinator event (e.g. new consumer appeared, new broker appeared etc.) and error if failed to subscribe.
func (zc *ZookeeperCoordinator) SubscribeForChanges(Groupid string) (<-chan CoordinatorEvent, error) {
changes := make(chan CoordinatorEvent)
zc.ensureZkPathsExist(Groupid)
Infof(zc, "Subscribing for changes for %s", Groupid)
consumersWatcher, err := zc.getConsumersInGroupWatcher(Groupid)
if err != nil {
return nil, err
}
consumerGroupChangesWatcher, err := zc.getConsumerGroupChangesWatcher(Groupid)
if err != nil {
return nil, err
}
topicsWatcher, err := zc.getTopicsWatcher()
if err != nil {
return nil, err
}
brokersWatcher, err := zc.getAllBrokersInClusterWatcher()
if err != nil {
return nil, err
}
inputChannels := make([]*<-chan zk.Event, 0)
inputChannels = append(inputChannels, &consumersWatcher, &consumerGroupChangesWatcher, &topicsWatcher, &brokersWatcher)
zkEvents := make(chan zk.Event)
stopRedirecting := redirectChannelsTo(inputChannels, zkEvents)
go func() {
for {
select {
case e := <-zkEvents: {
Trace(zc, e)
if e.State == zk.StateDisconnected {
Debug(zc, "ZK watcher session ended, reconnecting...")
consumersWatcher, err = zc.getConsumersInGroupWatcher(Groupid)
if err != nil {
panic(err)
}
consumerGroupChangesWatcher, err = zc.getConsumerGroupChangesWatcher(Groupid)
if err != nil {
panic(err)
}
topicsWatcher, err = zc.getTopicsWatcher()
if err != nil {
panic(err)
}
brokersWatcher, err = zc.getAllBrokersInClusterWatcher()
if err != nil {
panic(err)
}
} else {
emptyEvent := zk.Event{}
if e != emptyEvent {
if strings.HasPrefix(e.Path, newZKGroupDirs(Groupid).ConsumerChangesDir) {
changes <- NewTopicDeployed
} else {
changes <- Regular
}
} else {
//TODO configurable?
time.Sleep(2 * time.Second)
}
}
}
case <-zc.unsubscribe: {
stopRedirecting <- true
return
}
}
}
}()
return changes, nil
}
func (zc *ZookeeperCoordinator) GetNewDeployedTopics(Group string) (map[string]*DeployedTopics, error) {
changesPath := newZKGroupDirs(Group).ConsumerChangesDir
children, _, err := zc.zkConn.Children(changesPath)
if err != nil {
return nil, errors.New(fmt.Sprintf("Unable to get new deployed topics: %s", err.Error()))
}
deployedTopics := make(map[string]*DeployedTopics)
for _, child := range children {
entryPath := fmt.Sprintf("%s/%s", changesPath, child)
rawDeployedTopicsEntry, _, err := zc.zkConn.Get(entryPath)
if err != nil {
return nil, errors.New(fmt.Sprintf("Unable to fetch deployed topic entry %s: %s", entryPath, err.Error()))
}
deployedTopicsEntry := &DeployedTopics{}
err = json.Unmarshal(rawDeployedTopicsEntry, deployedTopicsEntry)
if err != nil {
return nil, errors.New(fmt.Sprintf("Unable to parse deployed topic entry %s: %s", rawDeployedTopicsEntry, err.Error()))
}
deployedTopics[child] = deployedTopicsEntry
}
return deployedTopics, nil
}
func (zc *ZookeeperCoordinator) DeployTopics(Group string, Topics DeployedTopics) error {
data, err := json.Marshal(Topics)
if err != nil {
return err
}
return zc.createOrUpdatePathParentMayNotExist(fmt.Sprintf("%s/%d", newZKGroupDirs(Group).ConsumerChangesDir, time.Now().Unix()), data)
}
/* Tells the ConsumerCoordinator to unsubscribe from events for the consumer it is associated with. */
func (zc *ZookeeperCoordinator) Unsubscribe() {
zc.unsubscribe <- true
}
// Tells the ConsumerCoordinator to claim partition topic Topic and partition Partition for consumerThreadId fetcher that works within a consumer group Group.
// Returns true if claim is successful, false and error explaining failure otherwise.
func (zc *ZookeeperCoordinator) ClaimPartitionOwnership(Groupid string, Topic string, Partition int32, consumerThreadId ConsumerThreadId) (bool, error) {
var err error
for i := 0; i <= zc.config.MaxClaimPartitionRetries; i++ {
ok, err := zc.tryClaimPartitionOwnership(Groupid, Topic, Partition, consumerThreadId)
if ok {
return ok, err
}
Tracef(zc, "Claim failed for topic %s, partition %d after %d-th retry", Topic, Partition, i)
time.Sleep(zc.config.ClaimPartitionBackoff)
}
return false, err
}
func (zc *ZookeeperCoordinator) tryClaimPartitionOwnership(group string, topic string, partition int32, consumerThreadId ConsumerThreadId) (bool, error) {
dirs := newZKGroupTopicDirs(group, topic)
zc.createOrUpdatePathParentMayNotExist(dirs.ConsumerOwnerDir, make([]byte, 0))
pathToOwn := fmt.Sprintf("%s/%d", dirs.ConsumerOwnerDir, partition)
_, err := zc.zkConn.Create(pathToOwn, []byte(consumerThreadId.String()), zk.FlagEphemeral, zk.WorldACL(zk.PermAll))
if err == zk.ErrNoNode {
err = zc.createOrUpdatePathParentMayNotExist(dirs.ConsumerOwnerDir, make([]byte, 0))
if err != nil {
return false, err
}
_, err = zc.zkConn.Create(pathToOwn, []byte(consumerThreadId.String()), zk.FlagEphemeral, zk.WorldACL(zk.PermAll))
}
if (err != nil) {
if (err == zk.ErrNodeExists) {
Debugf(consumerThreadId, "waiting for the partition ownership to be deleted: %d", partition)
return false, nil
} else {
Error(consumerThreadId, err)
return false, err
}
}
Debugf(zc, "Successfully claimed partition %d in topic %s for %s", partition, topic, consumerThreadId)
return true, nil
}
// Tells the ConsumerCoordinator to release partition ownership on topic Topic and partition Partition for consumer group Groupid.
// Returns error if failed to released partition ownership.
func (zc *ZookeeperCoordinator) ReleasePartitionOwnership(Groupid string, Topic string, Partition int32) error {
err := zc.deletePartitionOwnership(Groupid, Topic, Partition)
if (err != nil) {
if err == zk.ErrNoNode {
Warn(zc, err)
return nil
} else {
return err
}
}
return nil
}
// Tells the ConsumerCoordinator to commit offset Offset for topic and partition TopicPartition for consumer group Groupid.
// Returns error if failed to commit offset.
func (zc *ZookeeperCoordinator) CommitOffset(Groupid string, TopicPartition *TopicAndPartition, Offset int64) error {
dirs := newZKGroupTopicDirs(Groupid, TopicPartition.Topic)
return zc.createOrUpdatePathParentMayNotExist(fmt.Sprintf("%s/%d", dirs.ConsumerOffsetDir, TopicPartition.Partition), []byte(strconv.FormatInt(Offset, 10)))
}
func (zc *ZookeeperCoordinator) ensureZkPathsExist(group string) {
dirs := newZKGroupDirs(group)
zc.createOrUpdatePathParentMayNotExist(dirs.ConsumerDir, make([]byte, 0))
zc.createOrUpdatePathParentMayNotExist(dirs.ConsumerGroupDir, make([]byte, 0))
zc.createOrUpdatePathParentMayNotExist(dirs.ConsumerRegistryDir, make([]byte, 0))
zc.createOrUpdatePathParentMayNotExist(dirs.ConsumerChangesDir, make([]byte, 0))
}
func (zc *ZookeeperCoordinator) getAllBrokersInClusterWatcher() (<- chan zk.Event, error) {
Debug(zc, "Subscribing for events from broker registry")
_, _, watcher, err := zc.zkConn.ChildrenW(brokerIdsPath)
if (err != nil) {
return nil, err
}
return watcher, nil
}
func (zc *ZookeeperCoordinator) getConsumersInGroupWatcher(group string) (<- chan zk.Event, error) {
Debugf(zc, "Getting consumer watcher for group %s", group)
_, _, watcher, err := zc.zkConn.ChildrenW(newZKGroupDirs(group).ConsumerRegistryDir)
if (err != nil) {
return nil, err
}
return watcher, nil
}
func (zc *ZookeeperCoordinator) getConsumerGroupChangesWatcher(group string) (<- chan zk.Event, error) {
Debugf(zc, "Getting watcher for consumer group '%s' changes", group)
_, _, watcher, err := zc.zkConn.ChildrenW(newZKGroupDirs(group).ConsumerChangesDir)
if (err != nil) {
return nil, err
}
return watcher, nil
}
func (zc *ZookeeperCoordinator) getTopicsWatcher() (watcher <- chan zk.Event, err error) {
_, _, watcher, err = zc.zkConn.ChildrenW(brokerTopicsPath)
return
}
func (zc *ZookeeperCoordinator) getBrokerInfo(brokerId int32) (*BrokerInfo, error) {
Debugf(zc, "Getting info for broker %d", brokerId)
pathToBroker := fmt.Sprintf("%s/%d", brokerIdsPath, brokerId)
data, _, zkError := zc.zkConn.Get(pathToBroker)
if (zkError != nil) {
return nil, zkError
}
broker := &BrokerInfo{}
mappingError := json.Unmarshal([]byte(data), broker)
return broker, mappingError
}
func (zc *ZookeeperCoordinator) getPartitionAssignmentsForTopics(topics []string) (map[string]map[int32][]int32, error) {
Debugf(zc, "Trying to get partition assignments for topics %v", topics)
result := make(map[string]map[int32][]int32)
for _, topic := range topics {
topicInfo, err := zc.getTopicInfo(topic)
if (err != nil) {
return nil, err
}
result[topic] = make(map[int32][]int32)
for partition, replicaIds := range topicInfo.Partitions {
partitionInt, err := strconv.Atoi(partition)
if (err != nil) {
return nil, err
}
result[topic][int32(partitionInt)] = replicaIds
}
}
return result, nil
}
func (zc *ZookeeperCoordinator) getTopicInfo(topic string) (*TopicInfo, error) {
data, _, err := zc.zkConn.Get(fmt.Sprintf("%s/%s", brokerTopicsPath, topic))
if (err != nil) {
return nil, err
}
topicInfo := &TopicInfo{}
err = json.Unmarshal(data, topicInfo)
if (err != nil) {
return nil, err
}
return topicInfo, nil
}
func (zc *ZookeeperCoordinator) createOrUpdatePathParentMayNotExist(pathToCreate string, data []byte) error {
Debugf(zc, "Trying to create path %s in Zookeeper", pathToCreate)
_, err := zc.zkConn.Create(pathToCreate, data, 0, zk.WorldACL(zk.PermAll))
if (err != nil) {
if (zk.ErrNodeExists == err) {
if (len(data) > 0) {
Debugf(zc, "Trying to update existing node %s", pathToCreate)
return zc.updateRecord(pathToCreate, data)
} else {
return nil
}
} else {
parent, _ := path.Split(pathToCreate)
err = zc.createOrUpdatePathParentMayNotExist(parent[:len(parent)-1], make([]byte, 0))
if (err != nil) {
if zk.ErrNodeExists != err { Error(zc, err.Error()) }
return err
} else {
Debugf(zc, "Successfully created path %s", parent[:len(parent)-1])
}
Debugf(zc, "Trying again to create path %s in Zookeeper", pathToCreate)
_, err = zc.zkConn.Create(pathToCreate, data, 0, zk.WorldACL(zk.PermAll))
}
}
return err
}
func (zc *ZookeeperCoordinator) deletePartitionOwnership(group string, topic string, partition int32) error {
pathToDelete := fmt.Sprintf("%s/%d", newZKGroupTopicDirs(group, topic).ConsumerOwnerDir, partition)
_, stat, err := zc.zkConn.Get(pathToDelete)
if (err != nil) {
return err
}
err = zc.zkConn.Delete(pathToDelete, stat.Version)
if (err != nil) {
return err
}
return nil
}
func (zc *ZookeeperCoordinator) updateRecord(pathToCreate string, dataToWrite []byte) error {
Debugf(zc, "Trying to update path %s", pathToCreate)
_, stat, _ := zc.zkConn.Get(pathToCreate)
_, err := zc.zkConn.Set(pathToCreate, dataToWrite, stat.Version)
return err
}
/* ZookeeperConfig is used to pass multiple configuration entries to ZookeeperCoordinator. */
type ZookeeperConfig struct {
/* Zookeeper hosts */
ZookeeperConnect []string
/* Zookeeper read timeout */
ZookeeperTimeout time.Duration
/* Max retries to claim one partition */
MaxClaimPartitionRetries int
/* Backoff to retry to claim partition */
ClaimPartitionBackoff time.Duration
}
/* Created a new ZookeeperConfig with sane defaults. Default ZookeeperConnect points to localhost. */
func NewZookeeperConfig() *ZookeeperConfig {
config := &ZookeeperConfig{}
config.ZookeeperConnect = []string{"localhost"}
config.ZookeeperTimeout = 1*time.Second
config.MaxClaimPartitionRetries = 3
config.ClaimPartitionBackoff = 150 * time.Millisecond
return config
}
type zkGroupDirs struct {
Group string
ConsumerDir string
ConsumerGroupDir string
ConsumerRegistryDir string
ConsumerChangesDir string
ConsumerSyncDir string
}
func newZKGroupDirs(group string) *zkGroupDirs {
consumerGroupDir := fmt.Sprintf("%s/%s", consumersPath, group)
consumerRegistryDir := fmt.Sprintf("%s/ids", consumerGroupDir)
consumerChangesDir := fmt.Sprintf("%s/changes", consumerGroupDir)
consumerSyncDir := fmt.Sprintf("%s/sync", consumerGroupDir)
return &zkGroupDirs {
Group: group,
ConsumerDir: consumersPath,
ConsumerGroupDir: consumerGroupDir,
ConsumerRegistryDir: consumerRegistryDir,
ConsumerChangesDir: consumerChangesDir,
ConsumerSyncDir: consumerSyncDir,
}
}
type zkGroupTopicDirs struct {
ZkGroupDirs *zkGroupDirs
Topic string
ConsumerOffsetDir string
ConsumerOwnerDir string
}
func newZKGroupTopicDirs(group string, topic string) *zkGroupTopicDirs {
zkGroupsDirs := newZKGroupDirs(group)
return &zkGroupTopicDirs {
ZkGroupDirs: zkGroupsDirs,
Topic: topic,
ConsumerOffsetDir: fmt.Sprintf("%s/%s/%s", zkGroupsDirs.ConsumerGroupDir, "offsets", topic),
ConsumerOwnerDir: fmt.Sprintf("%s/%s/%s", zkGroupsDirs.ConsumerGroupDir, "owners", topic),
}
}
//used for tests only
type mockZookeeperCoordinator struct {
commitHistory map[TopicAndPartition]int64
}
func newMockZookeeperCoordinator() *mockZookeeperCoordinator {
return &mockZookeeperCoordinator{
commitHistory: make(map[TopicAndPartition]int64),
}
}
func (mzk *mockZookeeperCoordinator) Connect() error { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) RegisterConsumer(consumerid string, group string, topicCount TopicsToNumStreams) error { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) DeregisterConsumer(consumerid string, group string) error { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) GetConsumerInfo(consumerid string, group string) (*ConsumerInfo, error) { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) GetConsumersPerTopic(group string, excludeInternalTopics bool) (map[string][]ConsumerThreadId, error) { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) GetConsumersInGroup(group string) ([]string, error) { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) GetAllTopics() ([]string, error) { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) GetPartitionsForTopics(topics []string) (map[string][]int32, error) { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) GetAllBrokers() ([]*BrokerInfo, error) { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) GetOffsetForTopicPartition(group string, topicPartition *TopicAndPartition) (int64, error) { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) NotifyConsumerGroup(group string, consumerId string) error { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) PurgeNotificationForGroup(Group string, notificationId string) error { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) SubscribeForChanges(group string) (<-chan CoordinatorEvent, error) { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) GetNewDeployedTopics(Group string) (map[string]*DeployedTopics, error) { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) Unsubscribe() { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) ClaimPartitionOwnership(group string, topic string, partition int32, consumerThreadId ConsumerThreadId) (bool, error) { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) ReleasePartitionOwnership(group string, topic string, partition int32) error { panic("Not implemented") }
func (mzk *mockZookeeperCoordinator) CommitOffset(group string, topicPartition *TopicAndPartition, offset int64) error {
mzk.commitHistory[*topicPartition] = offset
return nil
}
|
<reponame>sankhesh/VTK<gh_stars>0
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
import sys
class TestClip(Testing.vtkTest):
def testImage2DScalar(self):
planes = ['XY', 'XZ', 'YZ']
expectedNCells = [38, 46, 42]
for plane, nCells in zip(planes,expectedNCells):
r = vtk.vtkRTAnalyticSource()
r.SetXFreq(600);
r.SetYFreq(400);
r.SetZFreq(900);
if plane == 'XY':
r.SetWholeExtent(-5, 5, -5, 5, 0, 0)
elif plane == 'XZ':
r.SetWholeExtent(-5, 5, 0, 0, -5, 5)
else:
r.SetWholeExtent(0, 0, -5, 5, -5, 5)
r.Update()
c = vtk.vtkTableBasedClipDataSet()
c.SetInputConnection(r.GetOutputPort())
c.SetUseValueAsOffset(0)
c.SetValue(150)
c.SetInsideOut(1)
c.Update()
self.assertEqual(c.GetOutput().GetNumberOfCells(), nCells)
def testImage(self):
r = vtk.vtkRTAnalyticSource()
r.SetWholeExtent(-5, 5, -5, 5, -5, 5)
r.Update()
s = vtk.vtkSphere()
s.SetRadius(2)
s.SetCenter(0,0,0)
c = vtk.vtkTableBasedClipDataSet()
c.SetInputConnection(r.GetOutputPort())
c.SetClipFunction(s)
c.SetInsideOut(1)
c.Update()
self.assertEqual(c.GetOutput().GetNumberOfCells(), 64)
def testRectilinear(self):
rt = vtk.vtkRTAnalyticSource()
rt.SetWholeExtent(-5, 5, -5, 5, -5, 5)
rt.Update()
i = rt.GetOutput()
r = vtk.vtkRectilinearGrid()
dims = i.GetDimensions()
r.SetDimensions(dims)
exts = i.GetExtent()
orgs = i.GetOrigin()
xs = vtk.vtkFloatArray()
xs.SetNumberOfTuples(dims[0])
for d in range(dims[0]):
xs.SetTuple1(d, orgs[0] + exts[0] + d)
r.SetXCoordinates(xs)
ys = vtk.vtkFloatArray()
ys.SetNumberOfTuples(dims[1])
for d in range(dims[1]):
ys.SetTuple1(d, orgs[1] + exts[2] + d)
r.SetYCoordinates(ys)
zs = vtk.vtkFloatArray()
zs.SetNumberOfTuples(dims[2])
for d in range(dims[2]):
zs.SetTuple1(d, orgs[2] + exts[4] + d)
r.SetZCoordinates(zs)
s = vtk.vtkSphere()
s.SetRadius(2)
s.SetCenter(0,0,0)
c = vtk.vtkTableBasedClipDataSet()
c.SetInputData(r)
c.SetClipFunction(s)
c.SetInsideOut(1)
c.Update()
self.assertEqual(c.GetOutput().GetNumberOfCells(), 64)
def testStructured2D(self):
planes = ['XY', 'XZ', 'YZ']
expectedNCells = [42, 34, 68]
for plane, nCells in zip(planes,expectedNCells):
rt = vtk.vtkRTAnalyticSource()
if plane == 'XY':
rt.SetWholeExtent(-5, 5, -5, 5, 0, 0)
elif plane == 'XZ':
rt.SetWholeExtent(-5, 5, 0, 0, -5, 5)
else:
rt.SetWholeExtent(0, 0, -5, 5, -5, 5)
rt.Update()
i = rt.GetOutput()
st = vtk.vtkStructuredGrid()
st.SetDimensions(i.GetDimensions())
nps = i.GetNumberOfPoints()
ps = vtk.vtkPoints()
ps.SetNumberOfPoints(nps)
for idx in range(nps):
ps.SetPoint(idx, i.GetPoint(idx))
st.SetPoints(ps)
cyl = vtk.vtkCylinder()
cyl.SetRadius(2)
cyl.SetCenter(0,0,0)
transform = vtk.vtkTransform()
transform.RotateWXYZ(45,20,1,10)
cyl.SetTransform(transform)
c = vtk.vtkTableBasedClipDataSet()
c.SetInputData(st)
c.SetClipFunction(cyl)
c.SetInsideOut(1)
c.Update()
self.assertEqual(c.GetOutput().GetNumberOfCells(), nCells)
def testStructured(self):
rt = vtk.vtkRTAnalyticSource()
rt.SetWholeExtent(-5, 5, -5, 5, -5, 5)
rt.Update()
i = rt.GetOutput()
st = vtk.vtkStructuredGrid()
st.SetDimensions(i.GetDimensions())
nps = i.GetNumberOfPoints()
ps = vtk.vtkPoints()
ps.SetNumberOfPoints(nps)
for idx in range(nps):
ps.SetPoint(idx, i.GetPoint(idx))
st.SetPoints(ps)
s = vtk.vtkSphere()
s.SetRadius(2)
s.SetCenter(0,0,0)
c = vtk.vtkTableBasedClipDataSet()
c.SetInputData(st)
c.SetClipFunction(s)
c.SetInsideOut(1)
c.Update()
self.assertEqual(c.GetOutput().GetNumberOfCells(), 64)
def testUnstructured(self):
rt = vtk.vtkRTAnalyticSource()
rt.SetWholeExtent(-5, 5, -5, 5, -5, 5)
t = vtk.vtkThreshold()
t.SetInputConnection(rt.GetOutputPort())
t.ThresholdByUpper(-10)
s = vtk.vtkSphere()
s.SetRadius(2)
s.SetCenter(0,0,0)
c = vtk.vtkTableBasedClipDataSet()
c.SetInputConnection(t.GetOutputPort())
c.SetClipFunction(s)
c.SetInsideOut(1)
c.Update()
self.assertEqual(c.GetOutput().GetNumberOfCells(), 64)
eg = vtk.vtkEnSightGoldReader()
eg.SetCaseFileName(VTK_DATA_ROOT + "/Data/EnSight/elements.case")
eg.Update()
pl = vtk.vtkPlane()
pl.SetOrigin(3.5, 3.5, 0.5)
pl.SetNormal(0, 0, 1)
c.SetInputConnection(eg.GetOutputPort())
c.SetClipFunction(pl)
c.SetInsideOut(1)
c.Update()
data = c.GetOutputDataObject(0).GetBlock(0)
self.assertEqual(data.GetNumberOfCells(), 75)
rw = vtk.vtkRenderWindow()
ren = vtk.vtkRenderer()
rw.AddRenderer(ren)
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(data)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
ren.AddActor(actor)
ac = ren.GetActiveCamera()
ac.SetPosition(-7.9, 9.7, 14.6)
ac.SetFocalPoint(3.5, 3.5, 0.5)
ac.SetViewUp(0.08, 0.93, -0.34)
rw.Render()
ren.ResetCameraClippingRange()
rtTester = vtk.vtkTesting()
for arg in sys.argv[1:]:
rtTester.AddArgument(arg)
rtTester.AddArgument("-V")
rtTester.AddArgument("tableBasedClip.png")
rtTester.SetRenderWindow(rw)
rw.Render()
rtResult = rtTester.RegressionTest(10)
if __name__ == "__main__":
Testing.main([(TestClip, 'test')])
|
/**
* Build PasswordRecoverDTO for notifying password recovery details successfully.
*
* @param notificationChannel Notified channel
* @param confirmationCode Confirmation code for confirm recovery
* @param resendCode Code to resend recovery confirmation code
* @return PasswordRecoverDTO object
*/
private PasswordRecoverDTO buildPasswordRecoveryResponseDTO(String notificationChannel, String confirmationCode,
String resendCode) {
PasswordRecoverDTO passwordRecoverDTO = new PasswordRecoverDTO();
passwordRecoverDTO.setNotificationChannel(notificationChannel);
if (NotificationChannels.EXTERNAL_CHANNEL.getChannelType().equals(notificationChannel)) {
passwordRecoverDTO.setConfirmationCode(confirmationCode);
}
passwordRecoverDTO.setResendCode(resendCode);
passwordRecoverDTO.setCode(
IdentityRecoveryConstants.SuccessEvents.SUCCESS_STATUS_CODE_PASSWORD_RECOVERY_INTERNALLY_NOTIFIED
.getCode());
passwordRecoverDTO.setMessage(
IdentityRecoveryConstants.SuccessEvents.SUCCESS_STATUS_CODE_PASSWORD_RECOVERY_INTERNALLY_NOTIFIED
.getMessage());
return passwordRecoverDTO;
} |
A Brezis-Nirenberg result for non-local critical equations in low dimension
The present paper is devoted to the study of the following non-local fractional equation involving critical nonlinearities
\begin{eqnarray}
(-\Delta)^s u-\lambda u=|u|^{2^*-2}u, in \Omega \\
u=0, in R^n\setminus \Omega,
\end{eqnarray}
where $s\in (0,1)$ is fixed, $(-\Delta )^s$ is the fractional Laplace operator, $\lambda$ is a positive parameter, $2^*$ is the fractional critical Sobolev exponent and $\Omega$ is an open bounded subset of $R^n$, $n>2s$, with Lipschitz boundary. In the recent papers we investigated the existence of non-trivial solutions for this problem when $\Omega$ is an open bounded subset of $R^n$ with $n\geq 4s$ and, in this framework, we prove some existence results.
Aim of this paper is to complete the investigation carried on in , by considering the case when $2s < n < 4s$. In this context, we prove an existence theorem for our problem, which may be seen as a Brezis-Nirenberg type result in low dimension.
In particular when $s=1$ (and consequently $n=3$) our result is the classical result obtained by Brezis and Nirenberg in the famous paper .
In this sense the present work may be considered as the extension
of some classical results for the Laplacian to the case of non-local fractional operators. |
import pytest
from recipes.tests.share import create_recipes
from users.tests.share import create_user_api
pytestmark = [pytest.mark.django_db]
URL = '/api/users/subscriptions/'
RESPONSE_KEYS = (
'id',
'email',
'username',
'first_name',
'last_name',
'is_subscribed',
'recipes',
'recipes_count',
)
RECIPE_FIELDS = ('id', 'name', 'image', 'cooking_time')
PAGINATION_PARAMS = ('count', 'next', 'previous', 'results')
def test_ok(as_anon, as_user, as_admin, admin, ingredients, tags):
another_user = create_user_api(as_anon)
create_recipes(as_admin, ingredients, tags)
as_user.get(
f'/api/users/{another_user.id}/subscribe/',
expected_status=201,
)
as_user.get(f'/api/users/{admin.id}/subscribe/', expected_status=201)
got = as_user.get(URL)
assert tuple(got.keys()) == PAGINATION_PARAMS
assert tuple(got['results'][0].keys()) == RESPONSE_KEYS
assert got['count'] == 2
results = got['results']
assert admin.email == results[0]['email']
assert another_user.email == results[1]['email']
assert results[0]['is_subscribed']
assert results[1]['is_subscribed']
assert len(results[0]['recipes']) == 2
assert len(results[1]['recipes']) == 0
assert tuple(results[0]['recipes'][0].keys()) == RECIPE_FIELDS
def test_recipes_limit_recipes_count(
as_user,
as_admin,
admin,
ingredients,
tags,
):
create_recipes(as_admin, ingredients, tags)
as_user.get(f'/api/users/{admin.id}/subscribe/', expected_status=201)
got = as_user.get(URL, {'recipes_limit': 1})
assert len(got['results'][0]['recipes']) == 1
assert got['results'][0]['recipes_count'] == 2
def test_anon(as_anon):
as_anon.get(URL, expected_status=401)
|
<gh_stars>1-10
/* Generated by RuntimeBrowser
Image: /System/Library/PrivateFrameworks/PhotoLibraryServices.framework/PhotoLibraryServices
*/
@interface PLResourceDataStoreOptions : NSObject {
bool _assumeNoExistingResources;
bool _baseKeyOnExistingData;
}
@property (nonatomic) bool assumeNoExistingResources;
@property (nonatomic) bool baseKeyOnExistingData;
- (bool)assumeNoExistingResources;
- (bool)baseKeyOnExistingData;
- (void)setAssumeNoExistingResources:(bool)arg1;
- (void)setBaseKeyOnExistingData:(bool)arg1;
@end
|
/**
* Contains the set of parsed entities from the document.
*/
public static class EntitySet {
private HashMap<String, Entity> entcache = new HashMap<String, Entity>();
void addEntity(final Entity ent) {
entcache.put(ent.getId(), ent);
}
boolean containsEntity(final String type, final String name) {
return entcache.containsKey(Entity.entityId(type, name));
}
Entity createEntity(final String type, final String name) {
final Entity ent = new Entity();
ent.setType(type);
ent.setName(name);
ent.set = this;
addEntity(ent);
return ent;
}
Entity getOrCreateEntity(final String type, final String name) {
if (containsEntity(type, name)) {
return entcache.get(Entity.entityId(type, name));
}
return createEntity(type, name);
}
/**
* Return the collection of entities.
*
* @return the entities.
*/
public Collection<Entity> getEntities() {
return entcache.values();
}
} |
/*
===========================================================================
Doom 3 GPL Source Code
Copyright (C) 1999-2011 id Software LLC, a ZeniMax Media company.
This file is part of the Doom 3 GPL Source Code (?Doom 3 Source Code?).
Doom 3 Source Code is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Doom 3 Source Code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Doom 3 Source Code. If not, see <http://www.gnu.org/licenses/>.
In addition, the Doom 3 Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 Source Code. If not, please request a copy in writing from id Software at the address below.
If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA.
===========================================================================
*/
/*
** WIN_GLIMP.C
**
** This file contains ALL Win32 specific stuff having to do with the
** OpenGL refresh. When a port is being made the following functions
** must be implemented by the port:
**
** GLimp_SwapBuffers
** GLimp_Init
** GLimp_Shutdown
** GLimp_SetGamma
**
** Note that the GLW_xxx functions are Windows specific GL-subsystem
** related functions that are relevant ONLY to win_glimp.c
*/
#include "../../idlib/precompiled.h"
#pragma hdrstop
#include "win_local.h"
#include "rc/AFEditor_resource.h"
#include "rc/doom_resource.h"
#include "../../renderer/tr_local.h"
static void GLW_InitExtensions( void );
// WGL_ARB_extensions_string
PFNWGLGETEXTENSIONSSTRINGARBPROC wglGetExtensionsStringARB;
// WGL_EXT_swap_interval
PFNWGLSWAPINTERVALEXTPROC wglSwapIntervalEXT;
// WGL_ARB_pixel_format
PFNWGLGETPIXELFORMATATTRIBIVARBPROC wglGetPixelFormatAttribivARB;
PFNWGLGETPIXELFORMATATTRIBFVARBPROC wglGetPixelFormatAttribfvARB;
PFNWGLCHOOSEPIXELFORMATARBPROC wglChoosePixelFormatARB;
// WGL_ARB_pbuffer
PFNWGLCREATEPBUFFERARBPROC wglCreatePbufferARB;
PFNWGLGETPBUFFERDCARBPROC wglGetPbufferDCARB;
PFNWGLRELEASEPBUFFERDCARBPROC wglReleasePbufferDCARB;
PFNWGLDESTROYPBUFFERARBPROC wglDestroyPbufferARB;
PFNWGLQUERYPBUFFERARBPROC wglQueryPbufferARB;
// WGL_ARB_render_texture
PFNWGLBINDTEXIMAGEARBPROC wglBindTexImageARB;
PFNWGLRELEASETEXIMAGEARBPROC wglReleaseTexImageARB;
PFNWGLSETPBUFFERATTRIBARBPROC wglSetPbufferAttribARB;
/* ARB_pixel_format */
#define WGL_NUMBER_PIXEL_FORMATS_ARB 0x2000
#define WGL_DRAW_TO_WINDOW_ARB 0x2001
#define WGL_DRAW_TO_BITMAP_ARB 0x2002
#define WGL_ACCELERATION_ARB 0x2003
#define WGL_NEED_PALETTE_ARB 0x2004
#define WGL_NEED_SYSTEM_PALETTE_ARB 0x2005
#define WGL_SWAP_LAYER_BUFFERS_ARB 0x2006
#define WGL_SWAP_METHOD_ARB 0x2007
#define WGL_NUMBER_OVERLAYS_ARB 0x2008
#define WGL_NUMBER_UNDERLAYS_ARB 0x2009
#define WGL_TRANSPARENT_ARB 0x200A
#define WGL_SHARE_DEPTH_ARB 0x200C
#define WGL_SHARE_STENCIL_ARB 0x200D
#define WGL_SHARE_ACCUM_ARB 0x200E
#define WGL_SUPPORT_GDI_ARB 0x200F
#define WGL_SUPPORT_OPENGL_ARB 0x2010
#define WGL_DOUBLE_BUFFER_ARB 0x2011
#define WGL_STEREO_ARB 0x2012
#define WGL_PIXEL_TYPE_ARB 0x2013
#define WGL_COLOR_BITS_ARB 0x2014
#define WGL_RED_BITS_ARB 0x2015
#define WGL_RED_SHIFT_ARB 0x2016
#define WGL_GREEN_BITS_ARB 0x2017
#define WGL_GREEN_SHIFT_ARB 0x2018
#define WGL_BLUE_BITS_ARB 0x2019
#define WGL_BLUE_SHIFT_ARB 0x201A
#define WGL_ALPHA_BITS_ARB 0x201B
#define WGL_ALPHA_SHIFT_ARB 0x201C
#define WGL_ACCUM_BITS_ARB 0x201D
#define WGL_ACCUM_RED_BITS_ARB 0x201E
#define WGL_ACCUM_GREEN_BITS_ARB 0x201F
#define WGL_ACCUM_BLUE_BITS_ARB 0x2020
#define WGL_ACCUM_ALPHA_BITS_ARB 0x2021
#define WGL_DEPTH_BITS_ARB 0x2022
#define WGL_STENCIL_BITS_ARB 0x2023
#define WGL_AUX_BUFFERS_ARB 0x2024
#define WGL_NO_ACCELERATION_ARB 0x2025
#define WGL_GENERIC_ACCELERATION_ARB 0x2026
#define WGL_FULL_ACCELERATION_ARB 0x2027
#define WGL_SWAP_EXCHANGE_ARB 0x2028
#define WGL_SWAP_COPY_ARB 0x2029
#define WGL_SWAP_UNDEFINED_ARB 0x202A
#define WGL_TYPE_RGBA_ARB 0x202B
#define WGL_TYPE_COLORINDEX_ARB 0x202C
#define WGL_TRANSPARENT_RED_VALUE_ARB 0x2037
#define WGL_TRANSPARENT_GREEN_VALUE_ARB 0x2038
#define WGL_TRANSPARENT_BLUE_VALUE_ARB 0x2039
#define WGL_TRANSPARENT_ALPHA_VALUE_ARB 0x203A
#define WGL_TRANSPARENT_INDEX_VALUE_ARB 0x203B
/* ARB_multisample */
#define WGL_SAMPLE_BUFFERS_ARB 0x2041
#define WGL_SAMPLES_ARB 0x2042
//
// function declaration
//
bool QGL_Init( const char *dllname );
void QGL_Shutdown( void );
/*
========================
GLimp_GetOldGammaRamp
========================
*/
static void GLimp_SaveGamma( void ) {
HDC hDC;
BOOL success;
hDC = GetDC( GetDesktopWindow() );
success = GetDeviceGammaRamp( hDC, win32.oldHardwareGamma );
common->DPrintf( "...getting default gamma ramp: %s\n", success ? "success" : "failed" );
ReleaseDC( GetDesktopWindow(), hDC );
}
/*
========================
GLimp_RestoreGamma
========================
*/
static void GLimp_RestoreGamma( void ) {
HDC hDC;
BOOL success;
// if we never read in a reasonable looking
// table, don't write it out
if ( win32.oldHardwareGamma[0][255] == 0 ) {
return;
}
hDC = GetDC( GetDesktopWindow() );
success = SetDeviceGammaRamp( hDC, win32.oldHardwareGamma );
common->DPrintf ( "...restoring hardware gamma: %s\n", success ? "success" : "failed" );
ReleaseDC( GetDesktopWindow(), hDC );
}
/*
========================
GLimp_SetGamma
The renderer calls this when the user adjusts r_gamma or r_brightness
========================
*/
void GLimp_SetGamma( unsigned short red[256], unsigned short green[256], unsigned short blue[256] ) {
unsigned short table[3][256];
int i;
if ( !win32.hDC ) {
return;
}
for ( i = 0; i < 256; i++ ) {
table[0][i] = red[i];
table[1][i] = green[i];
table[2][i] = blue[i];
}
if ( !SetDeviceGammaRamp( win32.hDC, table ) ) {
common->Printf( "WARNING: SetDeviceGammaRamp failed.\n" );
}
}
/*
=============================================================================
WglExtension Grabbing
This is gross -- creating a window just to get a context to get the wgl extensions
=============================================================================
*/
/*
====================
FakeWndProc
Only used to get wglExtensions
====================
*/
LONG WINAPI FakeWndProc (
HWND hWnd,
UINT uMsg,
WPARAM wParam,
LPARAM lParam) {
if ( uMsg == WM_DESTROY ) {
PostQuitMessage(0);
}
if ( uMsg != WM_CREATE ) {
return DefWindowProc(hWnd, uMsg, wParam, lParam);
}
const static PIXELFORMATDESCRIPTOR pfd = {
sizeof(PIXELFORMATDESCRIPTOR),
1,
PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER,
PFD_TYPE_RGBA,
24,
0, 0, 0, 0, 0, 0,
8, 0,
0, 0, 0, 0,
24, 8,
0,
PFD_MAIN_PLANE,
0,
0,
0,
0,
};
int pixelFormat;
HDC hDC;
HGLRC hGLRC;
hDC = GetDC(hWnd);
// Set up OpenGL
pixelFormat = ChoosePixelFormat(hDC, &pfd);
SetPixelFormat(hDC, pixelFormat, &pfd);
hGLRC = qwglCreateContext(hDC);
qwglMakeCurrent(hDC, hGLRC);
// free things
wglMakeCurrent(NULL, NULL);
wglDeleteContext(hGLRC);
ReleaseDC(hWnd, hDC);
return DefWindowProc(hWnd, uMsg, wParam, lParam);
}
/*
==================
GLW_GetWGLExtensionsWithFakeWindow
==================
*/
void GLW_CheckWGLExtensions( HDC hDC ) {
wglGetExtensionsStringARB = (PFNWGLGETEXTENSIONSSTRINGARBPROC)
GLimp_ExtensionPointer("wglGetExtensionsStringARB");
if ( wglGetExtensionsStringARB ) {
glConfig.wgl_extensions_string = (const char *) wglGetExtensionsStringARB(hDC);
} else {
glConfig.wgl_extensions_string = "";
}
// WGL_EXT_swap_control
wglSwapIntervalEXT = (PFNWGLSWAPINTERVALEXTPROC) GLimp_ExtensionPointer( "wglSwapIntervalEXT" );
r_swapInterval.SetModified(); // force a set next frame
// WGL_ARB_pixel_format
wglGetPixelFormatAttribivARB = (PFNWGLGETPIXELFORMATATTRIBIVARBPROC)GLimp_ExtensionPointer("wglGetPixelFormatAttribivARB");
wglGetPixelFormatAttribfvARB = (PFNWGLGETPIXELFORMATATTRIBFVARBPROC)GLimp_ExtensionPointer("wglGetPixelFormatAttribfvARB");
wglChoosePixelFormatARB = (PFNWGLCHOOSEPIXELFORMATARBPROC)GLimp_ExtensionPointer("wglChoosePixelFormatARB");
// WGL_ARB_pbuffer
wglCreatePbufferARB = (PFNWGLCREATEPBUFFERARBPROC)GLimp_ExtensionPointer("wglCreatePbufferARB");
wglGetPbufferDCARB = (PFNWGLGETPBUFFERDCARBPROC)GLimp_ExtensionPointer("wglGetPbufferDCARB");
wglReleasePbufferDCARB = (PFNWGLRELEASEPBUFFERDCARBPROC)GLimp_ExtensionPointer("wglReleasePbufferDCARB");
wglDestroyPbufferARB = (PFNWGLDESTROYPBUFFERARBPROC)GLimp_ExtensionPointer("wglDestroyPbufferARB");
wglQueryPbufferARB = (PFNWGLQUERYPBUFFERARBPROC)GLimp_ExtensionPointer("wglQueryPbufferARB");
// WGL_ARB_render_texture
wglBindTexImageARB = (PFNWGLBINDTEXIMAGEARBPROC)GLimp_ExtensionPointer("wglBindTexImageARB");
wglReleaseTexImageARB = (PFNWGLRELEASETEXIMAGEARBPROC)GLimp_ExtensionPointer("wglReleaseTexImageARB");
wglSetPbufferAttribARB = (PFNWGLSETPBUFFERATTRIBARBPROC)GLimp_ExtensionPointer("wglSetPbufferAttribARB");
}
/*
==================
GLW_GetWGLExtensionsWithFakeWindow
==================
*/
static void GLW_GetWGLExtensionsWithFakeWindow( void ) {
HWND hWnd;
MSG msg;
// Create a window for the sole purpose of getting
// a valid context to get the wglextensions
hWnd = CreateWindow(WIN32_FAKE_WINDOW_CLASS_NAME, GAME_NAME,
WS_OVERLAPPEDWINDOW,
40, 40,
640,
480,
NULL, NULL, win32.hInstance, NULL );
if ( !hWnd ) {
common->FatalError( "GLW_GetWGLExtensionsWithFakeWindow: Couldn't create fake window" );
}
HDC hDC = GetDC( hWnd );
HGLRC gRC = wglCreateContext( hDC );
wglMakeCurrent( hDC, gRC );
GLW_CheckWGLExtensions( hDC );
wglDeleteContext( gRC );
ReleaseDC( hWnd, hDC );
DestroyWindow( hWnd );
while ( GetMessage( &msg, NULL, 0, 0 ) ) {
TranslateMessage( &msg );
DispatchMessage( &msg );
}
}
//=============================================================================
/*
====================
GLW_WM_CREATE
====================
*/
void GLW_WM_CREATE( HWND hWnd ) {
}
/*
====================
GLW_InitDriver
Set the pixelformat for the window before it is
shown, and create the rendering context
====================
*/
static bool GLW_InitDriver( glimpParms_t parms ) {
PIXELFORMATDESCRIPTOR src =
{
sizeof(PIXELFORMATDESCRIPTOR), // size of this pfd
1, // version number
PFD_DRAW_TO_WINDOW | // support window
PFD_SUPPORT_OPENGL | // support OpenGL
PFD_DOUBLEBUFFER, // double buffered
PFD_TYPE_RGBA, // RGBA type
32, // 32-bit color depth
0, 0, 0, 0, 0, 0, // color bits ignored
8, // 8 bit destination alpha
0, // shift bit ignored
0, // no accumulation buffer
0, 0, 0, 0, // accum bits ignored
24, // 24-bit z-buffer
8, // 8-bit stencil buffer
0, // no auxiliary buffer
PFD_MAIN_PLANE, // main layer
0, // reserved
0, 0, 0 // layer masks ignored
};
common->Printf( "Initializing OpenGL driver\n" );
//
// get a DC for our window if we don't already have one allocated
//
if ( win32.hDC == NULL ) {
common->Printf( "...getting DC: " );
if ( ( win32.hDC = GetDC( win32.hWnd ) ) == NULL ) {
common->Printf( "^3failed^0\n" );
return false;
}
common->Printf( "succeeded\n" );
}
// the multisample path uses the wgl
if ( wglChoosePixelFormatARB && parms.multiSamples > 1 ) {
int iAttributes[20];
FLOAT fAttributes[] = {0, 0};
UINT numFormats;
// FIXME: specify all the other stuff
iAttributes[0] = WGL_SAMPLE_BUFFERS_ARB;
iAttributes[1] = 1;
iAttributes[2] = WGL_SAMPLES_ARB;
iAttributes[3] = parms.multiSamples;
iAttributes[4] = WGL_DOUBLE_BUFFER_ARB;
iAttributes[5] = TRUE;
iAttributes[6] = WGL_STENCIL_BITS_ARB;
iAttributes[7] = 8;
iAttributes[8] = WGL_DEPTH_BITS_ARB;
iAttributes[9] = 24;
iAttributes[10] = WGL_RED_BITS_ARB;
iAttributes[11] = 8;
iAttributes[12] = WGL_BLUE_BITS_ARB;
iAttributes[13] = 8;
iAttributes[14] = WGL_GREEN_BITS_ARB;
iAttributes[15] = 8;
iAttributes[16] = WGL_ALPHA_BITS_ARB;
iAttributes[17] = 8;
iAttributes[18] = 0;
iAttributes[19] = 0;
wglChoosePixelFormatARB( win32.hDC, iAttributes, fAttributes, 1, &win32.pixelformat, &numFormats );
} else {
// this is the "classic" choose pixel format path
// eventually we may need to have more fallbacks, but for
// now, ask for everything
if ( parms.stereo ) {
common->Printf( "...attempting to use stereo\n" );
src.dwFlags |= PFD_STEREO;
}
//
// choose, set, and describe our desired pixel format. If we're
// using a minidriver then we need to bypass the GDI functions,
// otherwise use the GDI functions.
//
if ( ( win32.pixelformat = ChoosePixelFormat( win32.hDC, &src ) ) == 0 ) {
common->Printf( "...^3GLW_ChoosePFD failed^0\n");
return false;
}
common->Printf( "...PIXELFORMAT %d selected\n", win32.pixelformat );
}
// get the full info
DescribePixelFormat( win32.hDC, win32.pixelformat, sizeof( win32.pfd ), &win32.pfd );
glConfig.colorBits = win32.pfd.cColorBits;
glConfig.depthBits = win32.pfd.cDepthBits;
glConfig.stencilBits = win32.pfd.cStencilBits;
// XP seems to set this incorrectly
if ( !glConfig.stencilBits ) {
glConfig.stencilBits = 8;
}
// the same SetPixelFormat is used either way
if ( SetPixelFormat( win32.hDC, win32.pixelformat, &win32.pfd ) == FALSE ) {
common->Printf( "...^3SetPixelFormat failed^0\n", win32.hDC );
return false;
}
//
// startup the OpenGL subsystem by creating a context and making it current
//
common->Printf( "...creating GL context: " );
if ( ( win32.hGLRC = qwglCreateContext( win32.hDC ) ) == 0 ) {
common->Printf( "^3failed^0\n" );
return false;
}
common->Printf( "succeeded\n" );
common->Printf( "...making context current: " );
if ( !qwglMakeCurrent( win32.hDC, win32.hGLRC ) ) {
qwglDeleteContext( win32.hGLRC );
win32.hGLRC = NULL;
common->Printf( "^3failed^0\n" );
return false;
}
common->Printf( "succeeded\n" );
return true;
}
/*
====================
GLW_CreateWindowClasses
====================
*/
static void GLW_CreateWindowClasses( void ) {
WNDCLASS wc;
//
// register the window class if necessary
//
if ( win32.windowClassRegistered ) {
return;
}
memset( &wc, 0, sizeof( wc ) );
wc.style = 0;
wc.lpfnWndProc = (WNDPROC) MainWndProc;
wc.cbClsExtra = 0;
wc.cbWndExtra = 0;
wc.hInstance = win32.hInstance;
wc.hIcon = LoadIcon( win32.hInstance, MAKEINTRESOURCE(IDI_ICON1));
wc.hCursor = LoadCursor (NULL,IDC_ARROW);
wc.hbrBackground = (struct HBRUSH__ *)COLOR_GRAYTEXT;
wc.lpszMenuName = 0;
wc.lpszClassName = WIN32_WINDOW_CLASS_NAME;
if ( !RegisterClass( &wc ) ) {
common->FatalError( "GLW_CreateWindow: could not register window class" );
}
common->Printf( "...registered window class\n" );
// now register the fake window class that is only used
// to get wgl extensions
wc.style = 0;
wc.lpfnWndProc = (WNDPROC) FakeWndProc;
wc.cbClsExtra = 0;
wc.cbWndExtra = 0;
wc.hInstance = win32.hInstance;
wc.hIcon = LoadIcon( win32.hInstance, MAKEINTRESOURCE(IDI_ICON1));
wc.hCursor = LoadCursor (NULL,IDC_ARROW);
wc.hbrBackground = (struct HBRUSH__ *)COLOR_GRAYTEXT;
wc.lpszMenuName = 0;
wc.lpszClassName = WIN32_FAKE_WINDOW_CLASS_NAME;
if ( !RegisterClass( &wc ) ) {
common->FatalError( "GLW_CreateWindow: could not register window class" );
}
common->Printf( "...registered fake window class\n" );
win32.windowClassRegistered = true;
}
/*
=======================
GLW_CreateWindow
Responsible for creating the Win32 window.
If cdsFullscreen is true, it won't have a border
=======================
*/
static bool GLW_CreateWindow( glimpParms_t parms ) {
int stylebits;
int x, y, w, h;
int exstyle;
//
// compute width and height
//
if ( parms.fullScreen ) {
exstyle = WS_EX_TOPMOST;
stylebits = WS_POPUP|WS_VISIBLE|WS_SYSMENU;
x = 0;
y = 0;
w = parms.width;
h = parms.height;
} else {
RECT r;
// adjust width and height for window border
r.bottom = parms.height;
r.left = 0;
r.top = 0;
r.right = parms.width;
exstyle = 0;
stylebits = WINDOW_STYLE|WS_SYSMENU;
AdjustWindowRect (&r, stylebits, FALSE);
w = r.right - r.left;
h = r.bottom - r.top;
x = win32.win_xpos.GetInteger();
y = win32.win_ypos.GetInteger();
// adjust window coordinates if necessary
// so that the window is completely on screen
if ( x + w > win32.desktopWidth ) {
x = ( win32.desktopWidth - w );
}
if ( y + h > win32.desktopHeight ) {
y = ( win32.desktopHeight - h );
}
if ( x < 0 ) {
x = 0;
}
if ( y < 0 ) {
y = 0;
}
}
win32.hWnd = CreateWindowEx (
exstyle,
WIN32_WINDOW_CLASS_NAME,
GAME_NAME,
stylebits,
x, y, w, h,
NULL,
NULL,
win32.hInstance,
NULL);
if ( !win32.hWnd ) {
common->Printf( "^3GLW_CreateWindow() - Couldn't create window^0\n" );
return false;
}
::SetTimer( win32.hWnd, 0, 100, NULL );
ShowWindow( win32.hWnd, SW_SHOW );
UpdateWindow( win32.hWnd );
common->Printf( "...created window @ %d,%d (%dx%d)\n", x, y, w, h );
if ( !GLW_InitDriver( parms ) ) {
ShowWindow( win32.hWnd, SW_HIDE );
DestroyWindow( win32.hWnd );
win32.hWnd = NULL;
return false;
}
SetForegroundWindow( win32.hWnd );
SetFocus( win32.hWnd );
glConfig.isFullscreen = parms.fullScreen;
return true;
}
static void PrintCDSError( int value ) {
switch ( value ) {
case DISP_CHANGE_RESTART:
common->Printf( "restart required\n" );
break;
case DISP_CHANGE_BADPARAM:
common->Printf( "bad param\n" );
break;
case DISP_CHANGE_BADFLAGS:
common->Printf( "bad flags\n" );
break;
case DISP_CHANGE_FAILED:
common->Printf( "DISP_CHANGE_FAILED\n" );
break;
case DISP_CHANGE_BADMODE:
common->Printf( "bad mode\n" );
break;
case DISP_CHANGE_NOTUPDATED:
common->Printf( "not updated\n" );
break;
default:
common->Printf( "unknown error %d\n", value );
break;
}
}
/*
===================
GLW_SetFullScreen
===================
*/
static bool GLW_SetFullScreen( glimpParms_t parms ) {
#if 0
// for some reason, bounds checker claims that windows is
// writing past the bounds of dm in the get display frequency call
union {
DEVMODE dm;
byte filler[1024];
} hack;
#endif
DEVMODE dm;
int cdsRet;
DEVMODE devmode;
int modeNum;
bool matched;
// first make sure the user is not trying to select a mode that his card/monitor can't handle
matched = false;
for ( modeNum = 0 ; ; modeNum++ ) {
if ( !EnumDisplaySettings( NULL, modeNum, &devmode ) ) {
if ( matched ) {
// we got a resolution match, but not a frequency match
// so disable the frequency requirement
common->Printf( "...^3%dhz is unsupported at %dx%d^0\n", parms.displayHz, parms.width, parms.height );
parms.displayHz = 0;
break;
}
common->Printf( "...^3%dx%d is unsupported in 32 bit^0\n", parms.width, parms.height );
return false;
}
if ( (int)devmode.dmPelsWidth >= parms.width
&& (int)devmode.dmPelsHeight >= parms.height
&& devmode.dmBitsPerPel == 32 ) {
matched = true;
if ( parms.displayHz == 0 || devmode.dmDisplayFrequency == parms.displayHz ) {
break;
}
}
}
memset( &dm, 0, sizeof( dm ) );
dm.dmSize = sizeof( dm );
dm.dmPelsWidth = parms.width;
dm.dmPelsHeight = parms.height;
dm.dmBitsPerPel = 32;
dm.dmFields = DM_PELSWIDTH | DM_PELSHEIGHT | DM_BITSPERPEL;
if ( parms.displayHz != 0 ) {
dm.dmDisplayFrequency = parms.displayHz;
dm.dmFields |= DM_DISPLAYFREQUENCY;
}
common->Printf( "...calling CDS: " );
// try setting the exact mode requested, because some drivers don't report
// the low res modes in EnumDisplaySettings, but still work
if ( ( cdsRet = ChangeDisplaySettings( &dm, CDS_FULLSCREEN ) ) == DISP_CHANGE_SUCCESSFUL ) {
common->Printf( "ok\n" );
win32.cdsFullscreen = true;
return true;
}
//
// the exact mode failed, so scan EnumDisplaySettings for the next largest mode
//
common->Printf( "^3failed^0, " );
PrintCDSError( cdsRet );
common->Printf( "...trying next higher resolution:" );
// we could do a better matching job here...
for ( modeNum = 0 ; ; modeNum++ ) {
if ( !EnumDisplaySettings( NULL, modeNum, &devmode ) ) {
break;
}
if ( (int)devmode.dmPelsWidth >= parms.width
&& (int)devmode.dmPelsHeight >= parms.height
&& devmode.dmBitsPerPel == 32 ) {
if ( ( cdsRet = ChangeDisplaySettings( &devmode, CDS_FULLSCREEN ) ) == DISP_CHANGE_SUCCESSFUL ) {
common->Printf( "ok\n" );
win32.cdsFullscreen = true;
return true;
}
break;
}
}
common->Printf( "\n...^3no high res mode found^0\n" );
return false;
}
/*
===================
GLimp_Init
This is the platform specific OpenGL initialization function. It
is responsible for loading OpenGL, initializing it,
creating a window of the appropriate size, doing
fullscreen manipulations, etc. Its overall responsibility is
to make sure that a functional OpenGL subsystem is operating
when it returns to the ref.
If there is any failure, the renderer will revert back to safe
parameters and try again.
===================
*/
bool GLimp_Init( glimpParms_t parms ) {
const char *driverName;
HDC hDC;
common->Printf( "Initializing OpenGL subsystem\n" );
// check our desktop attributes
hDC = GetDC( GetDesktopWindow() );
win32.desktopBitsPixel = GetDeviceCaps( hDC, BITSPIXEL );
win32.desktopWidth = GetDeviceCaps( hDC, HORZRES );
win32.desktopHeight = GetDeviceCaps( hDC, VERTRES );
ReleaseDC( GetDesktopWindow(), hDC );
// we can't run in a window unless it is 32 bpp
if ( win32.desktopBitsPixel < 32 && !parms.fullScreen ) {
common->Printf("^3Windowed mode requires 32 bit desktop depth^0\n");
return false;
}
// save the hardware gamma so it can be
// restored on exit
GLimp_SaveGamma();
// create our window classes if we haven't already
GLW_CreateWindowClasses();
// this will load the dll and set all our qgl* function pointers,
// but doesn't create a window
// r_glDriver is only intended for using instrumented OpenGL
// dlls. Normal users should never have to use it, and it is
// not archived.
driverName = r_glDriver.GetString()[0] ? r_glDriver.GetString() : "opengl32";
if ( !QGL_Init( driverName ) ) {
common->Printf( "^3GLimp_Init() could not load r_glDriver \"%s\"^0\n", driverName );
return false;
}
// getting the wgl extensions involves creating a fake window to get a context,
// which is pretty disgusting, and seems to mess with the AGP VAR allocation
GLW_GetWGLExtensionsWithFakeWindow();
// try to change to fullscreen
if ( parms.fullScreen ) {
if ( !GLW_SetFullScreen( parms ) ) {
GLimp_Shutdown();
return false;
}
}
// try to create a window with the correct pixel format
// and init the renderer context
if ( !GLW_CreateWindow( parms ) ) {
GLimp_Shutdown();
return false;
}
// wglSwapinterval, etc
GLW_CheckWGLExtensions( win32.hDC );
// check logging
GLimp_EnableLogging( ( r_logFile.GetInteger() != 0 ) );
return true;
}
/*
===================
GLimp_SetScreenParms
Sets up the screen based on passed parms..
===================
*/
bool GLimp_SetScreenParms( glimpParms_t parms ) {
int exstyle;
int stylebits;
int x, y, w, h;
DEVMODE dm;
memset( &dm, 0, sizeof( dm ) );
dm.dmSize = sizeof( dm );
dm.dmFields = DM_PELSWIDTH | DM_PELSHEIGHT | DM_BITSPERPEL;
if ( parms.displayHz != 0 ) {
dm.dmDisplayFrequency = parms.displayHz;
dm.dmFields |= DM_DISPLAYFREQUENCY;
}
win32.cdsFullscreen = parms.fullScreen;
glConfig.isFullscreen = parms.fullScreen;
if ( parms.fullScreen ) {
exstyle = WS_EX_TOPMOST;
stylebits = WS_POPUP|WS_VISIBLE|WS_SYSMENU;
SetWindowLong( win32.hWnd, GWL_STYLE, stylebits );
SetWindowLong( win32.hWnd, GWL_EXSTYLE, exstyle );
dm.dmPelsWidth = parms.width;
dm.dmPelsHeight = parms.height;
dm.dmBitsPerPel = 32;
x = y = w = h = 0;
} else {
RECT r;
// adjust width and height for window border
r.bottom = parms.height;
r.left = 0;
r.top = 0;
r.right = parms.width;
w = r.right - r.left;
h = r.bottom - r.top;
x = win32.win_xpos.GetInteger();
y = win32.win_ypos.GetInteger();
// adjust window coordinates if necessary
// so that the window is completely on screen
if ( x + w > win32.desktopWidth ) {
x = ( win32.desktopWidth - w );
}
if ( y + h > win32.desktopHeight ) {
y = ( win32.desktopHeight - h );
}
if ( x < 0 ) {
x = 0;
}
if ( y < 0 ) {
y = 0;
}
dm.dmPelsWidth = win32.desktopWidth;
dm.dmPelsHeight = win32.desktopHeight;
dm.dmBitsPerPel = win32.desktopBitsPixel;
exstyle = 0;
stylebits = WINDOW_STYLE|WS_SYSMENU;
AdjustWindowRect (&r, stylebits, FALSE);
SetWindowLong( win32.hWnd, GWL_STYLE, stylebits );
SetWindowLong( win32.hWnd, GWL_EXSTYLE, exstyle );
common->Printf( "%i %i %i %i\n", x, y, w, h );
}
bool ret = ( ChangeDisplaySettings( &dm, parms.fullScreen ? CDS_FULLSCREEN : 0 ) == DISP_CHANGE_SUCCESSFUL );
SetWindowPos( win32.hWnd, parms.fullScreen ? HWND_TOPMOST : HWND_NOTOPMOST, x, y, w, h, parms.fullScreen ? SWP_NOSIZE | SWP_NOMOVE : SWP_SHOWWINDOW );
return ret;
}
/*
===================
GLimp_Shutdown
This routine does all OS specific shutdown procedures for the OpenGL
subsystem.
===================
*/
void GLimp_Shutdown( void ) {
const char *success[] = { "failed", "success" };
int retVal;
common->Printf( "Shutting down OpenGL subsystem\n" );
// set current context to NULL
if ( qwglMakeCurrent ) {
retVal = qwglMakeCurrent( NULL, NULL ) != 0;
common->Printf( "...wglMakeCurrent( NULL, NULL ): %s\n", success[retVal] );
}
// delete HGLRC
if ( win32.hGLRC ) {
retVal = qwglDeleteContext( win32.hGLRC ) != 0;
common->Printf( "...deleting GL context: %s\n", success[retVal] );
win32.hGLRC = NULL;
}
// release DC
if ( win32.hDC ) {
retVal = ReleaseDC( win32.hWnd, win32.hDC ) != 0;
common->Printf( "...releasing DC: %s\n", success[retVal] );
win32.hDC = NULL;
}
// destroy window
if ( win32.hWnd ) {
common->Printf( "...destroying window\n" );
ShowWindow( win32.hWnd, SW_HIDE );
DestroyWindow( win32.hWnd );
win32.hWnd = NULL;
}
// reset display settings
if ( win32.cdsFullscreen ) {
common->Printf( "...resetting display\n" );
ChangeDisplaySettings( 0, 0 );
win32.cdsFullscreen = false;
}
// close the thread so the handle doesn't dangle
if ( win32.renderThreadHandle ) {
common->Printf( "...closing smp thread\n" );
CloseHandle( win32.renderThreadHandle );
win32.renderThreadHandle = NULL;
}
// restore gamma
GLimp_RestoreGamma();
// shutdown QGL subsystem
QGL_Shutdown();
}
/*
=====================
GLimp_SwapBuffers
=====================
*/
void GLimp_SwapBuffers( void ) {
//
// wglSwapinterval is a windows-private extension,
// so we must check for it here instead of portably
//
if ( r_swapInterval.IsModified() ) {
r_swapInterval.ClearModified();
if ( wglSwapIntervalEXT ) {
wglSwapIntervalEXT( r_swapInterval.GetInteger() );
}
}
qwglSwapBuffers( win32.hDC );
//Sys_DebugPrintf( "*** SwapBuffers() ***\n" );
}
/*
===========================================================
SMP acceleration
===========================================================
*/
//#define REALLOC_DC
/*
===================
GLimp_ActivateContext
===================
*/
void GLimp_ActivateContext( void ) {
if ( !qwglMakeCurrent( win32.hDC, win32.hGLRC ) ) {
win32.wglErrors++;
}
}
/*
===================
GLimp_DeactivateContext
===================
*/
void GLimp_DeactivateContext( void ) {
qglFinish();
if ( !qwglMakeCurrent( win32.hDC, NULL ) ) {
win32.wglErrors++;
}
#ifdef REALLOC_DC
// makeCurrent NULL frees the DC, so get another
if ( ( win32.hDC = GetDC( win32.hWnd ) ) == NULL ) {
win32.wglErrors++;
}
#endif
}
/*
===================
GLimp_RenderThreadWrapper
===================
*/
static void GLimp_RenderThreadWrapper( void ) {
win32.glimpRenderThread();
// unbind the context before we die
qwglMakeCurrent( win32.hDC, NULL );
}
/*
=======================
GLimp_SpawnRenderThread
Returns false if the system only has a single processor
=======================
*/
bool GLimp_SpawnRenderThread( void (*function)( void ) ) {
SYSTEM_INFO info;
// check number of processors
GetSystemInfo( &info );
if ( info.dwNumberOfProcessors < 2 ) {
return false;
}
// create the IPC elements
win32.renderCommandsEvent = CreateEvent( NULL, TRUE, FALSE, NULL );
win32.renderCompletedEvent = CreateEvent( NULL, TRUE, FALSE, NULL );
win32.renderActiveEvent = CreateEvent( NULL, TRUE, FALSE, NULL );
win32.glimpRenderThread = function;
win32.renderThreadHandle = CreateThread(
NULL, // LPSECURITY_ATTRIBUTES lpsa,
0, // DWORD cbStack,
(LPTHREAD_START_ROUTINE)GLimp_RenderThreadWrapper, // LPTHREAD_START_ROUTINE lpStartAddr,
0, // LPVOID lpvThreadParm,
0, // DWORD fdwCreate,
&win32.renderThreadId );
if ( !win32.renderThreadHandle ) {
common->Error( "GLimp_SpawnRenderThread: failed" );
}
SetThreadPriority( win32.renderThreadHandle, THREAD_PRIORITY_ABOVE_NORMAL );
#if 0
// make sure they always run on different processors
SetThreadAffinityMask( GetCurrentThread, 1 );
SetThreadAffinityMask( win32.renderThreadHandle, 2 );
#endif
return true;
}
//#define DEBUG_PRINTS
/*
===================
GLimp_BackEndSleep
===================
*/
void *GLimp_BackEndSleep( void ) {
void *data;
#ifdef DEBUG_PRINTS
OutputDebugString( "-->GLimp_BackEndSleep\n" );
#endif
ResetEvent( win32.renderActiveEvent );
// after this, the front end can exit GLimp_FrontEndSleep
SetEvent( win32.renderCompletedEvent );
WaitForSingleObject( win32.renderCommandsEvent, INFINITE );
ResetEvent( win32.renderCompletedEvent );
ResetEvent( win32.renderCommandsEvent );
data = win32.smpData;
// after this, the main thread can exit GLimp_WakeRenderer
SetEvent( win32.renderActiveEvent );
#ifdef DEBUG_PRINTS
OutputDebugString( "<--GLimp_BackEndSleep\n" );
#endif
return data;
}
/*
===================
GLimp_FrontEndSleep
===================
*/
void GLimp_FrontEndSleep( void ) {
#ifdef DEBUG_PRINTS
OutputDebugString( "-->GLimp_FrontEndSleep\n" );
#endif
WaitForSingleObject( win32.renderCompletedEvent, INFINITE );
#ifdef DEBUG_PRINTS
OutputDebugString( "<--GLimp_FrontEndSleep\n" );
#endif
}
volatile bool renderThreadActive;
/*
===================
GLimp_WakeBackEnd
===================
*/
void GLimp_WakeBackEnd( void *data ) {
int r;
#ifdef DEBUG_PRINTS
OutputDebugString( "-->GLimp_WakeBackEnd\n" );
#endif
win32.smpData = data;
if ( renderThreadActive ) {
common->FatalError( "GLimp_WakeBackEnd: already active" );
}
r = WaitForSingleObject( win32.renderActiveEvent, 0 );
if ( r == WAIT_OBJECT_0 ) {
common->FatalError( "GLimp_WakeBackEnd: already signaled" );
}
r = WaitForSingleObject( win32.renderCommandsEvent, 0 );
if ( r == WAIT_OBJECT_0 ) {
common->FatalError( "GLimp_WakeBackEnd: commands already signaled" );
}
// after this, the renderer can continue through GLimp_RendererSleep
SetEvent( win32.renderCommandsEvent );
r = WaitForSingleObject( win32.renderActiveEvent, 5000 );
if ( r == WAIT_TIMEOUT ) {
common->FatalError( "GLimp_WakeBackEnd: WAIT_TIMEOUT" );
}
#ifdef DEBUG_PRINTS
OutputDebugString( "<--GLimp_WakeBackEnd\n" );
#endif
}
//===================================================================
/*
===================
GLimp_ExtensionPointer
Returns a function pointer for an OpenGL extension entry point
===================
*/
GLExtension_t GLimp_ExtensionPointer( const char *name ) {
void (*proc)(void);
proc = (GLExtension_t)qwglGetProcAddress( name );
if ( !proc ) {
common->Printf( "Couldn't find proc address for: %s\n", name );
}
return proc;
}
|
Georgia Tech Accepted Students Raise the Bar
Click image to enlarge Freshman Convocation is the official ceremony to welcome new students to Georgia Tech's community of learners—celebrating enthusiasm for learning and discovery in achieving the university's mission of teaching, research, and service. Download Image
The Georgia Institute of Technology has high expectations for the 2012-13 freshman class, because their qualifications for entry have raised the bar.
The average high school grade point average for those accepted to Georgia Tech for the fall semester is 3.9 with an average SAT score of 1430 or 2105 with writing included. That represents an average of 700 points for each section.
On average, admitted students will have taken eight classes that are Advanced Placement, International Baccalaureate or college-level by graduation. Georgia Tech’s admission review process and GPA re-calculation values a student’s choice to take more rigorous courses during his or her high school career.
“This year’s accepted class is truly exceptional,” said Director of Undergraduate Admission Rick Clark. “Each year, the competition for admission to Georgia Tech is increasing and every class raises the quality of our campus.”
According to Clark, this year’s decisions were extremely difficult. “We conduct an extensive, holistic and comparative review process, which means every application is read multiple times. Our admitted class of students not only has remarkable academic achievements and established intellectual curiosity, but also has proven potential to be tomorrow’s entrepreneurs, innovators and leaders.”
The class also continued the trend of being more diverse, with 86 countries and 49 states represented in the admitted class. Georgia Tech accepted more women and Hispanic students this year compared to last year. Clark clarifies, however, that admittance statistics don’t necessarily predict ultimate enrollment numbers.
“We’ll see these numbers fluctuate between admittance and matriculation,” said Clark. “Clearly this group of students has multiple offers of admission and high levels of scholarships to consider as well. It’s our sincere goal, however, to continue the recent trend of augmenting geographic, ethnic and background diversity in our class.”
Approximately 14,700 students applied for acceptance to Georgia Tech, an Institute record and a three percent increase from last year. Each student is vying for one of the 2,400 spots for fall admission or the 250 spots available to start this summer.
“Our summer program gives students the opportunity to experience a world-class research institution in an intimate setting,” said Clark. “Starting out with small class sizes, as well as a chance to really get to know professors, is a huge advantage.”
Early deposits are up by about 10 percent as well. Those accepted to Georgia Tech have until May 1 to make their deposits and enroll in classes during their freshman orientation. |
/**
* @author Javad Alimohammadi
*/
public class RedUtilsLockImpl implements RedUtilsLock {
private static final Logger log = LoggerFactory.getLogger(RedUtilsLockImpl.class);
private final ExecutorService operationExecutorService = Executors.newCachedThreadPool();
private final LockChannel lockChannel;
private final ReplicaManager replicaManager;
private final RedUtilsConfig redUtilsConfig;
private final JedisConnectionManager connectionManager;
/**
* Start with default Redis configuration, host:127.0.0.1 and port:6379
*/
public RedUtilsLockImpl() {
this(RedUtilsConfig.DEFAULT_HOST_ADDRESS, RedUtilsConfig.DEFAULT_PORT, 0);
}
public RedUtilsLockImpl(final String hostAddress, final int port) {
this(hostAddress, port, 0);
}
/**
* Use with master-replica configuration
*
* @param hostAddress server address of Redis
* @param port port number of Redis
* @param replicaCount number of replica
*/
public RedUtilsLockImpl(final String hostAddress, final int port, final int replicaCount) {
this(new RedUtilsConfig
.RedUtilsConfigBuilder()
.hostAddress(hostAddress)
.port(port)
.replicaCount(replicaCount)
.build()
);
}
/**
* To have more control on various configuration use this constructor
* @param redUtilsConfig various configuration parameter that can be set
*/
public RedUtilsLockImpl(RedUtilsConfig redUtilsConfig) {
this.redUtilsConfig = redUtilsConfig;
this.connectionManager = new JedisConnectionManager(redUtilsConfig);
this.lockChannel = new JedisLockChannel(connectionManager, redUtilsConfig.getUnlockedMessagePattern());
this.replicaManager = new JedisReplicaManager(connectionManager, redUtilsConfig.getReplicaCount(),
redUtilsConfig.getRetryCountForSyncingWithReplicas(), redUtilsConfig.getWaitingTimeForReplicasMillis());
}
@Override
public boolean tryAcquire(final String lockName, final OperationCallBack operationCallBack) {
if (!connectionManager.reserveOne()) {
throw new InsufficientResourceException("There is`t any available connection, please try again or change connection configs");
}
boolean getLockSuccessfully = getLock(lockName, redUtilsConfig.getLeaseTimeMillis());
LockRefresher lockRefresher = null;
if (getLockSuccessfully) {
try {
lockRefresher = new JedisLockRefresher(redUtilsConfig, replicaManager, connectionManager);
CompletableFuture<Void> lockRefresherFuture = lockRefresher.start(lockName);
CompletableFuture<Void> mainOperationFuture = CompletableFuture.runAsync(operationCallBack::doOperation,
operationExecutorService);
lockRefresherFuture.exceptionally(throwable -> {
mainOperationFuture.completeExceptionally(throwable);
return null;
});
mainOperationFuture.join();
} finally {
lockRefresher.tryStop(lockName);
tryReleaseLock(lockName);
tryNotifyOtherClients(lockName);
connectionManager.free();
}
return true;
}
return false;
}
@Override
public void acquire(final String lockName, final OperationCallBack operationCallBack) {
if (!connectionManager.reserve(2)) {
throw new InsufficientResourceException("There is`t any available connection, please try again or change connection configs");
}
boolean getLockSuccessfully = getLock(lockName, redUtilsConfig.getLeaseTimeMillis());
if (!getLockSuccessfully) {
try {
lockChannel.subscribe(lockName);
while (!getLockSuccessfully) {
final long ttl = getTTL(lockName);
if (ttl > 0) {
lockChannel.waitForNotification(lockName, ttl);
} else {
getLockSuccessfully = getLock(lockName, redUtilsConfig.getLeaseTimeMillis());
}
}
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new IllegalStateException("Interrupted");
} finally {
lockChannel.unSubscribe(lockName);
}
}
// At this point we have the lock
LockRefresher lockRefresher = new JedisLockRefresher(redUtilsConfig, replicaManager, connectionManager);
try {
CompletableFuture<Void> lockRefresherStatus = lockRefresher.start(lockName);
CompletableFuture<Void> mainOperationFuture = CompletableFuture.runAsync(operationCallBack::doOperation,
operationExecutorService);
lockRefresherStatus.exceptionally(throwable -> {
mainOperationFuture.completeExceptionally(throwable);
return null;
});
mainOperationFuture.join();
} finally {
lockRefresher.tryStop(lockName);
tryReleaseLock(lockName);
tryNotifyOtherClients(lockName);
connectionManager.free();
}
}
private boolean getLock(final String lockName, final long expirationTimeMillis) {
final String lockValue = ThreadManager.getName();
try {
Object response = connectionManager.doWithConnection(jedis -> {
return jedis.eval(LuaScript.GET_LOCK, 1, lockName, lockValue, String.valueOf(expirationTimeMillis));
});
if (RedisResponse.isFailed(response)) {
return false;
}
replicaManager.waitForResponse();
return true;
} catch (Exception exception) {
releaseLock(lockName);
throw exception;
}
}
private void releaseLock(String lockName) {
String lockValue = ThreadManager.getName();
connectionManager.doWithConnection(jedis -> {
return jedis.eval(LuaScript.RELEASE_LOCK, 1, lockName, lockValue);
});
}
private void tryReleaseLock(String lockName) {
try {
releaseLock(lockName);
} catch (Exception ex) {
log.debug("Could not release lock [{}]", lockName, ex);
}
}
private long getTTL(final String lockName) {
return connectionManager.doWithConnection(jedis -> jedis.pttl(lockName));
}
public void tryNotifyOtherClients(final String lockName) {
try {
connectionManager.doWithConnection(jedis -> {
return jedis.publish(lockName, redUtilsConfig.getUnlockedMessagePattern());
});
} catch (Exception exception) {
// nothing
log.debug("Error in notify [{}] to other clients", lockName, exception);
}
}
} |
<reponame>enolive/exercism
module Grains
( square
, total
) where
square :: Integer -> Maybe Integer
square n
| n `notElem` [1 .. 64] = Nothing
| otherwise = Just $ 2 ^ (n - 1)
total :: Integer
total = geometricSum 2 64
where
geometricSum q n = q ^ n - 1 |
Inhibition of the PI 3‐kinase pathway disrupts the unfolded protein response and reduces sensitivity to ER stress‐dependent apoptosis
Class Ia phosphoinositide 3‐kinases (PI3K) are critical mediators of insulin and growth factor action. We have demonstrated that the p85α regulatory subunit of PI3K modulates the unfolded protein response (UPR) by interacting with and regulating the nuclear translocation of XBP‐1s, a transcription factor essential for the UPR. We now show that PI3K activity is required for full activation of the UPR. Pharmacological inhibition of PI3K in cells blunts the ER stress‐dependent phosphorylation of IRE1α and PERK, decreases induction of ATF4, CHOP, and XBP‐1 and upregulates UPR target genes. Cells expressing a human p85α mutant (R649W) previously shown to inhibit PI3K, exhibit decreased activation of IRE1α and PERK and reduced induction of CHOP and ATF4. Pharmacological inhibition of PI3K, overexpression of a mutant of p85α that lacks the ability to interact with the p110α catalytic subunit (∆p85α) or expression of mutant p85α (R649W) in vivo, decreased UPR‐dependent induction of ER stress response genes. Acute tunicamycin treatment of R649W+/− mice revealed reduced induction of UPR target genes in adipose tissue, whereas chronic tunicamycin exposure caused sustained increases in UPR target genes in adipose tissue. Finally, R649W+/− cells exhibited a dramatic resistance to ER stress‐dependent apoptosis. These data suggest that PI3K pathway dysfunction causes ER stress that may drive the pathogenesis of several diseases including Type 2 diabetes and various cancers. |
<gh_stars>1-10
import * as dataTypes from '../dataTypes';
import { schema, t } from '../schema';
/**
* The `<relation>` element
*
* Parent element: `<identification>`
*
* The `<relation>` element describes a related resource for the music that is encoded. This is similar to the [Dublin
* Core relation element](https://www.dublincore.org/specifications/dublin-core/dcmi-terms/elements11/relation/).
*
* {@link https://www.w3.org/2021/06/musicxml40/musicxml-reference/elements/relation/}
*/
export const Relation = schema(
'relation',
{
/**
* Standard type values are music, words, and arrangement, but other types may be used.
*/
type: t.optional(dataTypes.token()),
},
[t.required(dataTypes.string())] as const
);
|
package sql
import (
"context"
"database/sql/driver"
"io"
"github.com/pkg/errors"
)
// ResultSet is SQL resultSet struct
type ResultSet struct {
Last bool
Data [][]driver.Value
Index int
}
// Column is SQL column struct
type Column struct {
Name string
ServerType string
}
// Rows is SQL rows struct
type Rows struct {
Connection Conn
QueryID string
ColumnsRaw []Column
ResultSet *ResultSet
}
// GetResultAndMoveNext returns result with index current value and increase index by 1.
// Note: GetResultAndMoveNext does not check index out of range
func (rs *ResultSet) GetResultAndMoveNext() []driver.Value {
r := rs.Data[rs.Index]
rs.Index++
return r
}
// Columns - see https://golang.org/pkg/database/sql/driver/#Rows for more details
func (r *Rows) Columns() []string {
cl := len(r.ColumnsRaw)
cs := make([]string, cl, cl)
for i, v := range r.ColumnsRaw {
cs[i] = v.Name
}
return cs
}
// ColumnTypeDatabaseTypeName - see https://golang.org/pkg/database/sql/driver/#RowsColumnTypeDatabaseTypeName for more details
func (r *Rows) ColumnTypeDatabaseTypeName(index int) string {
if 0 > index || index >= len(r.ColumnsRaw) {
return ""
}
return r.ColumnsRaw[index].ServerType
}
// Close - see https://golang.org/pkg/database/sql/driver/#Rows for more details
func (r *Rows) Close() error {
defer func() {
r.Connection = nil
r.QueryID = ""
}()
if len(r.QueryID) > 0 && r.Connection != nil {
return r.Connection.CloseQueryContext(context.Background(), r.QueryID)
}
return nil
}
// Next - see https://golang.org/pkg/database/sql/driver/#Rows for more details
func (r *Rows) Next(dest []driver.Value) error {
if r.Connection == nil {
return errors.New("Rows are closed")
}
if r.ResultSet == nil {
return io.EOF
}
size := len(r.ResultSet.Data)
if r.ResultSet.Index >= size {
if r.ResultSet.Last {
return io.EOF
}
var err error
r.ResultSet, err = r.Connection.FetchContext(context.Background(), r.QueryID, r.ColumnsRaw)
if err != nil {
return errors.Wrap(err, "Failed to get next page for the query")
}
if len(r.ResultSet.Data) == 0 {
return io.EOF
}
}
row := r.ResultSet.GetResultAndMoveNext()
for i, v := range row {
dest[i] = v
}
return nil
}
|
async def readblock(self):
sep = END_BLOCK + CARRIAGE_RETURN
seplen = len(sep)
try:
block = await self.readuntil(sep)
except LimitOverrunError as loe:
if self._buffer.startswith(sep, loe.consumed):
del self._buffer[: loe.consumed + seplen]
else:
self._buffer.clear()
self._maybe_resume_transport()
raise ValueError(loe.args[0])
if not block or block[0:1] != START_BLOCK:
raise InvalidBlockError(
"Block does not begin with Start Block character <VT>"
)
return block[1:-2] |
mod foo;
mod bar;
fn main() {
// bar::hello();
println!("hello");
}
|
/**
* Testing system exit
* http://stackoverflow.com/questions/309396/java-how-to-test-methods-that-call-system-exit
*
* Created by Stan Reshetnyk on 17.09.16.
*/
public class GenesisLoadTest {
@Test
public void shouldLoadGenesis_whenShortWay() {
loadGenesis(null, "frontier-test.json");
assertTrue(true);
}
@Test
public void shouldLoadGenesis_whenFullPathSpecified() throws URISyntaxException {
URL url = GenesisLoadTest.class.getClassLoader().getResource("genesis/frontier-test.json");
// full path
System.out.println("url.getPath() " + url.getPath());
loadGenesis(url.getPath(), null);
Path path = new File(url.toURI()).toPath();
Path curPath = new File("").getAbsoluteFile().toPath();
String relPath = curPath.relativize(path).toFile().getPath();
System.out.println("Relative path: " + relPath);
loadGenesis(relPath, null);
assertTrue(true);
}
@Test
public void shouldLoadGenesisFromFile_whenBothSpecified() {
URL url = GenesisLoadTest.class.getClassLoader().getResource("genesis/frontier-test.json");
// full path
System.out.println("url.getPath() " + url.getPath());
loadGenesis(url.getPath(), "NOT_EXIST");
assertTrue(true);
}
@Test(expected = RuntimeException.class)
public void shouldError_whenWrongPath() {
loadGenesis("NON_EXISTED_PATH", null);
assertTrue(false);
}
private void loadGenesis(String genesisFile, String genesisResource) {
Config config = ConfigFactory.empty();
if (genesisResource != null) {
config = config.withValue("genesis",
ConfigValueFactory.fromAnyRef(genesisResource));
}
if (genesisFile != null) {
config = config.withValue("genesisFile",
ConfigValueFactory.fromAnyRef(genesisFile));
}
new SystemProperties(config).getGenesis();
}
} |
// +build darwin
// +build cgo
package disk
/*
#cgo LDFLAGS: -framework CoreFoundation -framework IOKit
#include <stdint.h>
#include <CoreFoundation/CoreFoundation.h>
#include "iostat_darwin.h"
*/
import "C"
import (
"context"
"github.com/shirou/gopsutil/internal/common"
)
func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) {
var buf [C.NDRIVE]C.DriveStats
n, err := C.readdrivestat(&buf[0], C.int(len(buf)))
if err != nil {
return nil, err
}
ret := make(map[string]IOCountersStat, 0)
for i := 0; i < int(n); i++ {
d := IOCountersStat{
ReadBytes: uint64(buf[i].read),
WriteBytes: uint64(buf[i].written),
ReadCount: uint64(buf[i].nread),
WriteCount: uint64(buf[i].nwrite),
ReadTime: uint64(buf[i].readtime / 1000 / 1000), // note: read/write time are in ns, but we want ms.
WriteTime: uint64(buf[i].writetime / 1000 / 1000),
IoTime: uint64((buf[i].readtime + buf[i].writetime) / 1000 / 1000),
Name: C.GoString(&buf[i].name[0]),
}
if len(names) > 0 && !common.StringsHas(names, d.Name) {
continue
}
ret[d.Name] = d
}
return ret, nil
}
|
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgllife.model.model_zoo import GCNPredictor
class Predictor(nn.Module):
def __init__(self, in_feats, hidden_feats=[32, 32], batchnorm=[True, True], dropout=[0, 0], predictor_hidden_feats=32, n_tasks=64):
super(Predictor, self).__init__()
self.net1 = GCNPredictor(in_feats=in_feats,
hidden_feats=hidden_feats,
batchnorm=batchnorm,
dropout=dropout,
predictor_hidden_feats=predictor_hidden_feats,
n_tasks=n_tasks)
self.net2 = GCNPredictor(in_feats=in_feats,
hidden_feats=hidden_feats,
batchnorm=batchnorm,
dropout=dropout,
predictor_hidden_feats=predictor_hidden_feats,
n_tasks=n_tasks)
self.predict = nn.Sequential(nn.Linear(n_tasks*2, 64),
nn.ReLU(),
nn.Linear(64, 1))
def forward(self, bg, feats):
wt = self.net1(bg[0], feats[0])
mut = self.net2(bg[1], feats[1])
#print(wt)
#print(mut)
#print(mut.shape)
flattened = torch.cat([wt, mut], 1)
#print(flattened.shape)
return self.predict(flattened)
|
/**
* Apache Kafka implementation of Log.
*
* @since 9.3
*/
public class KafkaLogAppender<M extends Externalizable> implements CloseableLogAppender<M> {
private static final Log log = LogFactory.getLog(KafkaLogAppender.class);
protected final String topic;
protected final Properties consumerProps;
protected final Properties producerProps;
protected final int size;
// keep track of created tailers to make sure they are closed
protected final ConcurrentLinkedQueue<KafkaLogTailer<M>> tailers = new ConcurrentLinkedQueue<>();
protected final String name;
protected final KafkaNamespace ns;
protected final Codec<M> codec;
protected final Codec<M> encodingCodec;
protected KafkaProducer<String, Bytes> producer;
protected boolean closed;
protected static final AtomicInteger PRODUCER_CLIENT_ID_SEQUENCE = new AtomicInteger(1);
private KafkaLogAppender(Codec<M> codec, KafkaNamespace ns, String name, Properties producerProperties,
Properties consumerProperties) {
Objects.requireNonNull(codec);
this.codec = codec;
if (NO_CODEC.equals(codec)) {
this.encodingCodec = new SerializableCodec<>();
} else {
this.encodingCodec = codec;
}
this.ns = ns;
this.topic = ns.getTopicName(name);
this.name = name;
this.producerProps = producerProperties;
this.consumerProps = consumerProperties;
producerProps.setProperty(ProducerConfig.CLIENT_ID_CONFIG,
name + "-" + PRODUCER_CLIENT_ID_SEQUENCE.getAndIncrement());
this.producer = new KafkaProducer<>(this.producerProps);
this.size = producer.partitionsFor(topic).size();
if (log.isDebugEnabled()) {
log.debug(String.format("Created appender: %s on topic: %s with %d partitions", name, topic, size));
}
}
public static <M extends Externalizable> KafkaLogAppender<M> open(Codec<M> codec, KafkaNamespace ns, String name,
Properties producerProperties, Properties consumerProperties) {
return new KafkaLogAppender<>(codec, ns, name, producerProperties, consumerProperties);
}
@Override
public String name() {
return name;
}
public String getTopic() {
return topic;
}
@Override
public int size() {
return size;
}
@Override
public LogOffset append(String key, M message) {
Objects.requireNonNull(key);
int partition = (key.hashCode() & 0x7fffffff) % size;
return append(partition, key, message);
}
@Override
public LogOffset append(int partition, M message) {
String key = String.valueOf(partition);
return append(partition, key, message);
}
public LogOffset append(int partition, String key, M message) {
Bytes value = Bytes.wrap(encodingCodec.encode(message));
ProducerRecord<String, Bytes> record = new ProducerRecord<>(topic, partition, key, value);
Future<RecordMetadata> future = producer.send(record);
RecordMetadata result;
try {
result = future.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new StreamRuntimeException("Unable to send record: " + record, e);
} catch (ExecutionException e) {
throw new StreamRuntimeException("Unable to send record: " + record, e);
}
LogOffset ret = new LogOffsetImpl(name, partition, result.offset());
if (log.isDebugEnabled()) {
int len = record.value().get().length;
log.debug(String.format("Append to %s-%02d:+%d, len: %d, key: %s, value: %s", name, partition, ret.offset(),
len, key, message));
}
return ret;
}
@Override
public boolean waitFor(LogOffset offset, String group, Duration timeout) throws InterruptedException {
boolean ret = false;
if (!name.equals(offset.partition().name())) {
throw new IllegalArgumentException(name + " can not wait for an offset with a different Log: " + offset);
}
TopicPartition topicPartition = new TopicPartition(topic, offset.partition().partition());
try {
ret = isProcessed(group, topicPartition, offset.offset());
if (ret) {
return true;
}
long timeoutMs = timeout.toMillis();
long deadline = System.currentTimeMillis() + timeoutMs;
long delay = Math.min(100, timeoutMs);
while (!ret && System.currentTimeMillis() < deadline) {
Thread.sleep(delay);
ret = isProcessed(group, topicPartition, offset.offset());
}
return ret;
} finally {
if (log.isDebugEnabled()) {
log.debug("waitFor " + offset + "/" + group + " returns: " + ret);
}
}
}
@Override
public boolean closed() {
return closed;
}
@Override
public String toString() {
return "KafkaLogAppender{" + "name='" + name + '\'' + ", size=" + size + ", ns=" + ns + ", closed=" + closed
+ ", codec=" + codec + '}';
}
@Override
public Codec<M> getCodec() {
return codec;
}
protected boolean isProcessed(String group, TopicPartition topicPartition, long offset) {
// TODO: find a better way, this is expensive to create a consumer each time
// but this is needed, an open consumer is not properly updated
Properties props = (Properties) consumerProps.clone();
props.put(ConsumerConfig.GROUP_ID_CONFIG, ns.getKafkaGroup(group));
try (KafkaConsumer<String, Bytes> consumer = new KafkaConsumer<>(props)) {
consumer.assign(Collections.singletonList(topicPartition));
long last = consumer.position(topicPartition);
boolean ret = last > 0 && last > offset;
if (log.isDebugEnabled()) {
log.debug("isProcessed " + topicPartition.topic() + ":" + topicPartition.partition() + "/" + group
+ ":+" + offset + "? " + ret + ", current position: " + last);
}
return ret;
}
}
@Override
public void close() {
log.debug("Closing appender: " + name);
tailers.stream().filter(Objects::nonNull).forEach(tailer -> {
try {
tailer.close();
} catch (Exception e) {
log.error("Failed to close tailer: " + tailer);
}
});
tailers.clear();
if (producer != null) {
producer.close();
producer = null;
}
closed = true;
}
} |
/**
* Constants used across the file sharer.
*/
public class Constants {
public static final int BOOTSTRAP_SERVER_PORT = 55555;
public static final int TASK_INTERVAL = 3000;
public static final int THREAD_DISABLE_TIMEOUT = 1000;
public static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8;
public static final String LOCALHOST = "127.0.0.1";
public static final int RMI_REGISTRY_PORT = 33333;
public static final String RMI_REGISTRY_ENTRY_NETWORK_HANDLER_POSTFIX = "_network_handler";
public static final String RMI_REGISTRY_ENTRY_TRACER = "tracer";
public static final String RMI_REGISTRY_ENTRY_TRACEABLE_POSTFIX = "_traceable";
public static final String RMI_HOME_SYSTEM_PROPERTY = "java.rmi.server.hostname";
private Constants() { // Preventing from being initiated
}
} |
<filename>client/src/@bug-ui/Form/index.tsx
export * from './Input';
export * from './StyledH3Input'; |
/* Return -1 if exception,
FALSE if the property does not exist, TRUE if it exists. If TRUE is
returned, the property descriptor 'desc' is filled present. */
int JS_GetOwnPropertyInternal(JSContext *ctx, JSPropertyDescriptor *desc,
JSObject *p, JSAtom prop)
{
JSShapeProperty *prs;
JSProperty *pr;
retry:
prs = find_own_property(&pr, p, prop);
if (prs) {
if (desc) {
desc->flags = prs->flags & JS_PROP_C_W_E;
desc->getter = JS_UNDEFINED;
desc->setter = JS_UNDEFINED;
desc->value = JS_UNDEFINED;
if (UNLIKELY(prs->flags & JS_PROP_TMASK)) {
if ((prs->flags & JS_PROP_TMASK) == JS_PROP_GETSET) {
desc->flags |= JS_PROP_GETSET;
if (pr->u.getset.getter)
desc->getter = JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, pr->u.getset.getter));
if (pr->u.getset.setter)
desc->setter = JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, pr->u.getset.setter));
} else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_VARREF) {
JSValue val = *pr->u.var_ref->pvalue;
if (UNLIKELY(JS_IsUninitialized(val))) {
JS_ThrowReferenceErrorUninitialized(ctx, prs->atom);
return -1;
}
desc->value = JS_DupValue(ctx, val);
} else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_AUTOINIT) {
if (JS_AutoInitProperty(ctx, p, prop, pr, prs))
return -1;
goto retry;
}
} else {
desc->value = JS_DupValue(ctx, pr->u.value);
}
} else {
if (UNLIKELY((prs->flags & JS_PROP_TMASK) == JS_PROP_VARREF)) {
if (UNLIKELY(JS_IsUninitialized(*pr->u.var_ref->pvalue))) {
JS_ThrowReferenceErrorUninitialized(ctx, prs->atom);
return -1;
}
} else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_AUTOINIT) {
}
}
return TRUE;
}
if (p->is_exotic) {
if (p->fast_array) {
if (__JS_AtomIsTaggedInt(prop)) {
uint32_t idx;
idx = __JS_AtomToUInt32(prop);
if (idx < p->u.array.count) {
if (desc) {
desc->flags = JS_PROP_WRITABLE | JS_PROP_ENUMERABLE |
JS_PROP_CONFIGURABLE;
desc->getter = JS_UNDEFINED;
desc->setter = JS_UNDEFINED;
desc->value = JS_GetPropertyUint32(ctx, JS_MKPTR(JS_TAG_OBJECT, p), idx);
}
return TRUE;
}
}
} else {
const JSClassExoticMethods *em = ctx->rt->class_array[p->class_id].exotic;
if (em && em->get_own_property) {
return em->get_own_property(ctx, desc,
JS_MKPTR(JS_TAG_OBJECT, p), prop);
}
}
}
return FALSE;
} |
// TestClient_ArchiveWorkspaceTemplate tests archiving a workspace template
func TestClient_ArchiveWorkspaceTemplate(t *testing.T) {
testClientArchiveWorkspaceTemplateSuccess(t)
testClientArchiveWorkspaceTemplateNotFound(t)
} |
<reponame>picimako/gherkin-overview
/*
* Copyright 2021 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.picimako.gherkin.toolwindow;
import static org.assertj.core.api.Assertions.assertThat;
import com.intellij.icons.AllIcons;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.testFramework.fixtures.BasePlatformTestCase;
import com.intellij.util.PlatformIcons;
import com.picimako.gherkin.toolwindow.nodetype.Category;
import com.picimako.gherkin.toolwindow.nodetype.ContentRoot;
import com.picimako.gherkin.toolwindow.nodetype.FeatureFile;
import com.picimako.gherkin.toolwindow.nodetype.ModelDataRoot;
import com.picimako.gherkin.toolwindow.nodetype.Tag;
import icons.CucumberIcons;
/**
* Unit test for {@link GherkinTagTree.GherkinTagsNodeRenderer}.
*/
public class GherkinTagsNodeRendererTest extends BasePlatformTestCase {
private GherkinTagTree tree;
private GherkinTagTree.GherkinTagsNodeRenderer renderer;
@Override
protected String getTestDataPath() {
return "testdata/features";
}
@Override
protected void setUp() throws Exception {
super.setUp();
tree = new GherkinTagTree(new ProjectSpecificGherkinTagTreeModel(getProject()));
renderer = new GherkinTagTree.GherkinTagsNodeRenderer();
}
public void testRenderCellForModelDataRoot() {
renderer.customizeCellRenderer(tree, new ModelDataRoot(getProject()), true, true, false, 0, false);
assertThat(renderer.getIcon()).isEqualTo(PlatformIcons.FOLDER_ICON);
assertThat(renderer.getCharSequence(true)).isEqualTo("Gherkin Tags");
}
public void testRenderCellForModule() {
renderer.customizeCellRenderer(tree, ContentRoot.createModule("module name", getProject()), true, true, false, 0, false);
assertThat(renderer.getIcon()).isEqualTo(AllIcons.Actions.ModuleDirectory);
assertThat(renderer.getCharSequence(true)).isEqualTo("module name");
}
public void testRenderCellForCategory() {
renderer.customizeCellRenderer(tree, new Category("Test Suite", getProject()), true, true, false, 0, false);
assertThat(renderer.getIcon()).isEqualTo(PlatformIcons.LIBRARY_ICON);
assertThat(renderer.getCharSequence(true)).isEqualTo("Test Suite");
}
public void testRenderCellForTag() {
TagOccurrencesRegistry.getInstance(getProject()).init(1);
VirtualFile theGherkin = myFixture.configureByFile("the_gherkin.feature").getVirtualFile();
renderer.customizeCellRenderer(tree, new Tag("regression", theGherkin, getProject()), true, true, false, 0, false);
assertThat(renderer.getIcon()).isEqualTo(AllIcons.Gutter.ExtAnnotation);
assertThat(renderer.getCharSequence(true)).isEqualTo("regression");
}
public void testRenderCellForGherkinFile() {
TagOccurrencesRegistry.getInstance(getProject()).init(1);
VirtualFile theGherkin = myFixture.configureByFile("the_gherkin.feature").getVirtualFile();
FeatureFile featureFile = new FeatureFile(theGherkin, "parent", getProject());
renderer.customizeCellRenderer(tree, featureFile, true, true, true, 0, false);
assertThat(renderer.getIcon()).isEqualTo(CucumberIcons.Cucumber);
assertThat(renderer.getCharSequence(true)).isEqualTo("the_gherkin.feature");
}
}
|
<reponame>congard/algine
#ifndef ALGINE_BONEMATRICES_H
#define ALGINE_BONEMATRICES_H
#include <algine/std/animation/BoneMatrix.h>
#include <vector>
namespace algine {
using BoneMatrices = std::vector<BoneMatrix>;
}
#endif //ALGINE_BONEMATRICES_H
|
/**
* Asserts that {@link Assembly#isSuffixSymbolName(String)} returns the expected result for various inputs.
*/
@Test
public void isSuffixSymbolName() {
assertThat(Assembly.isSuffixSymbolName(""), is(false));
assertThat(Assembly.isSuffixSymbolName("A"), is(false));
assertThat(Assembly.isSuffixSymbolName("ABC"), is(false));
assertThat(Assembly.isSuffixSymbolName("."), is(true));
assertThat(Assembly.isSuffixSymbolName(".A"), is(true));
assertThat(Assembly.isSuffixSymbolName(".ABC"), is(true));
} |
/**
* This class contains hooks used in the {@link net.minecraft.world.item.crafting.RecipeManager RecipeManager} bytecode.
*
* @author Jonathing
* @see net.minecraft.world.item.crafting.RecipeManager#apply(Map, ResourceManager, ProfilerFiller)
* RecipeManager.apply(Map, ResourceManager, ProfilerFiller)
* @since 2.2.2
*/
public final class RecipeManagerHooks
{
private static final Marker MARKER = MarkerManager.getMarker("RecipeManagerCoreMod");
private RecipeManagerHooks()
{
}
/**
* This modifies a given {@link Map} of {@link ResourceLocation}s and {@link JsonElement}s containing all of the
* recipes to be parsed on world load. It uses the {@link net.minecraftforge.fml.ModList ModList} to check if a
* particular mod is <em>not</em> loaded. This way, I can disable recipes specific for that mod so the console
* doesn't shit itself when it tries to parse through that recipe.
*
* @param recipeMap The map of recipes to be loaded on world load.
*/
public static void removeOptionalRecipes(Map<ResourceLocation, JsonElement> recipeMap)
{
ForageCraftData.OPTIONAL_RECIPES.forEach((modId, recipes) ->
{
if (!ModList.get().isLoaded(modId))
{
int size = recipes.size();
LOGGER.debug(MARKER, "Skipping {} recipe{} since {} is not installed.", size, size == 1 ? "" : "s", modId);
ForageCraftData.OPTIONAL_RECIPES.get(modId).forEach(recipeMap::remove);
}
});
}
} |
/**
* Find the marker that has the 'LayoutFields.CUSTOM_INFO' name in a marker tree.
*
* @param marker the root marker of the tree, or subtree
* @param visited a collection of already visited markers, used to avoid an infinite loop
* @return the marker if found, otherwise `null`
*/
public static Marker findCustomFieldsMarker(Marker marker, Set<Marker> visited) {
if (marker == null || visited.contains(marker)) {
return null;
} else if (LayoutFields.CUSTOM_INFO.equals(marker.getName())) {
return marker;
} else {
visited.add(marker);
Iterator<Marker> children = marker.iterator();
while (children.hasNext()) {
Marker foundMarker = findCustomFieldsMarker(children.next(), visited);
if (foundMarker != null)
return foundMarker;
}
}
return null;
} |
<reponame>topoi-lang/topoi
{-# LANGUAGE OverloadedStrings #-}
module Parse.Lexer where
import Data.Char
import Data.List.NonEmpty (NonEmpty (..))
import qualified Data.List.NonEmpty as NE
import Data.Loc
import Data.Maybe (fromMaybe)
import Data.Text (Text)
import qualified Data.Text as Text
import Language.Lexer.Applicative hiding (LexicalError)
import qualified Language.Lexer.Applicative as Lexer
import Parse.TokenStream (PrettyToken (..))
import Text.Regex.Applicative
data Tok
= TokNewline
| TokWhiteSpace
| TokEOF
| TokParenOpen
| TokParenClose
| TokUpperIdent Text
| TokLowerIdent Text
| TokInt Int
| TokAssign
| TokSemicolon
| TokLeftArrow
| TokDocLineComment Text
| TokEq
deriving (Eq, Ord)
instance Show Tok where
show tok = case tok of
TokNewline -> "\n"
TokWhiteSpace -> " "
TokEOF -> ""
TokParenOpen -> "("
TokParenClose -> ")"
TokUpperIdent s -> show s
TokLowerIdent s -> show s
TokInt i -> show i
TokAssign -> "define"
TokSemicolon -> ":"
TokLeftArrow -> "->"
TokDocLineComment s -> "--|" <> show s
TokEq -> "="
text :: Text -> RE Text Text
text rawText = Text.foldr f (pure "") rawText
where
f :: Char -> RE Text Text -> RE Text Text
f c p = Text.cons <$ sym (Text.singleton c) <*> pure c <*> p
-- | Regex rules
tokRE :: RE Text Tok
tokRE =
TokNewline <$ text "\n"
<|> TokParenOpen <$ text "("
<|> TokParenClose <$ text ")"
<|> TokAssign <$ text "define"
<|> TokLowerIdent <$> lowercaseIdentifierRE
<|> TokUpperIdent <$> uppercaseIdentifierRE
<|> TokInt <$> intRE
<|> TokSemicolon <$ text ":"
<|> TokLeftArrow <$ text "->"
<|> TokEq <$ text "="
check :: (Char -> Bool) -> Text -> Bool
check f xs
| Text.null xs = False
| otherwise = f (Text.head xs)
-- starts with `'` character
atomRE :: RE Text Text
atomRE =
Text.append <$> psym (check (== '\''))
<*> (Text.concat <$> many (psym (check (\c -> isAlphaNum c || c == '_'))))
identifierRE :: RE Text Text
identifierRE =
Text.append <$> psym (check isAlpha)
<*> (Text.concat <$> many (psym (check (\c -> isAlphaNum c || c == '_'))))
uppercaseIdentifierRE :: RE Text Text
uppercaseIdentifierRE =
Text.append <$> psym (check isUpper) -- it actually check the Unicode!
<*> (Text.concat <$> many (psym (check (\c -> isAlphaNum c || c == '_'))))
lowercaseIdentifierRE :: RE Text Text
lowercaseIdentifierRE =
Text.append <$> psym (check isLower) -- it actually check the Unicode!
<*> (Text.concat <$> many (psym (check (\c -> isAlphaNum c || c == '_'))))
intRE :: RE Text Int
intRE = read <$> (Text.unpack . Text.concat <$> (some . psym . check $ isDigit))
contra :: RE Text a -> RE Char a
contra = comap Text.singleton
whitespaceButNewlineRE :: RE Text Tok
whitespaceButNewlineRE =
matchWhen
(check (\c -> isSpace c && c /= '\n' && c /= '\r'))
TokWhiteSpace
where
matchWhen :: (Text -> Bool) -> Tok -> RE Text Tok
matchWhen p symbol = msym (\t -> if p t then Just symbol else Nothing)
docLineCommentRE :: Text -> RE Text Tok
docLineCommentRE _prefix =
TokDocLineComment <$> (Text.concat <$> many anySym) +++ (text "\n")
where
(+++) = liftA2 (<>)
lexer :: Lexer Tok
lexer =
mconcat
[ token (longest $ contra tokRE),
whitespace (longest $ contra whitespaceButNewlineRE),
whitespace (longestShortest (contra $ text "--") (contra . (\_ -> many anySym *> text "\n"))),
whitespace (longestShortest (contra $ text "--[") (contra . (\_ -> many anySym *> text "]--"))),
token (longestShortest (contra $ text "--|") (contra . docLineCommentRE))
]
-- | Scanning
type LexicalError = Pos
-- A TokenStream is a stream of token to replace list, from regex-applicative package
type TokStream = TokenStream (L Tok)
scan :: FilePath -> Text -> Either LexicalError TokStream
scan filepath = filterError . runLexer lexer filepath . Text.unpack
where
filterError :: TokStream -> Either LexicalError TokStream
filterError TsEof = Right TsEof
filterError (TsToken l xs) = TsToken l <$> filterError xs
filterError (TsError (Lexer.LexicalError pos)) = Left pos
instance PrettyToken Tok where
restoreToken = show
prettyTokens (x :| []) = fromMaybe ("'" <> show (unLoc x) <> "'") (prettyToken' (unLoc x))
prettyTokens xs = "\"" <> concatMap (f . unLoc) (NE.toList xs) <> "\""
where
f tok = case prettyToken' tok of
Nothing -> show tok
Just pretty -> "<" <> pretty <> ">"
{- Have to use Maybe here -}
prettyToken' :: Tok -> Maybe String
prettyToken' tok = case tok of
TokNewline -> Just "newline"
TokWhiteSpace -> Just "space"
TokEOF -> Just "end of file"
_ -> Nothing
|
<filename>java/form/src/org/netbeans/modules/form/layoutsupport/LayoutSupportManager.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.form.layoutsupport;
import java.awt.*;
import java.beans.*;
import java.util.*;
import org.openide.nodes.*;
import org.netbeans.modules.form.*;
import org.netbeans.modules.form.codestructure.*;
import org.netbeans.modules.form.layoutsupport.delegates.NullLayoutSupport;
import org.netbeans.modules.form.fakepeer.FakePeerSupport;
/**
* Main class of general layout support infrastructure. Connects form editor
* metadata with specialized LayoutSupportDelegate implementations (layout
* specific functionality is delegated to the right LayoutSupportDelegate).
*
* @author <NAME>
*/
public final class LayoutSupportManager implements LayoutSupportContext {
// possible component resizing directions (bit flag constants)
public static final int RESIZE_UP = 1;
public static final int RESIZE_DOWN = 2;
public static final int RESIZE_LEFT = 4;
public static final int RESIZE_RIGHT = 8;
private LayoutSupportDelegate layoutDelegate;
private boolean needInit;
private boolean initializeFromInstance;
private boolean initializeFromCode;
private Node.PropertySet[] propertySets;
private LayoutListener layoutListener;
private RADVisualContainer metaContainer;
private Container primaryContainer; // bean instance from metaContainer
private Container primaryContainerDelegate; // container delegate for it
private CodeStructure codeStructure;
private CodeExpression containerCodeExpression;
private CodeExpression containerDelegateCodeExpression;
// ----------
// initialization
// initialization for a new container, layout delegate is set to null
public LayoutSupportManager(RADVisualContainer container,
CodeStructure codeStructure)
{
this.metaContainer = container;
this.codeStructure = codeStructure;
}
/**
* Creation and initialization of a layout delegate for a new container.
* @return false if suitable layout delegate is not found
* @throw IllegalArgumentException if the container instance is not empty
*/
public boolean prepareLayoutDelegate(boolean fromCode, boolean initialize)
throws Exception
{
LayoutSupportDelegate delegate = null;
LayoutManager lmInstance = null;
FormModel formModel = metaContainer.getFormModel();
LayoutSupportRegistry layoutRegistry =
LayoutSupportRegistry.getRegistry(formModel);
// first try to find a dedicated layout delegate (for the container)
delegate = layoutRegistry.createSupportForContainer(metaContainer.getBeanClass());
if (delegate != null) {
if (!fromCode && !delegate.checkEmptyContainer(getPrimaryContainer())) {
RuntimeException ex = new IllegalArgumentException(
AbstractLayoutSupport.getBundle().getString("MSG_ERR_NonEmptyContainer")); // NOI18N
throw ex;
}
} else {
// find a general layout delegate (for LayoutManager of the container)
if (fromCode) { // initialization from code
Iterator it = CodeStructure.getDefinedStatementsIterator(
getContainerDelegateCodeExpression());
CodeStatement[] statements =
CodeStructure.filterStatements(
it, AbstractLayoutSupport.getSetLayoutMethod());
if (statements.length > 0) { // setLayout method found
CodeExpressionOrigin layoutOrigin =
statements[0].getStatementParameters()[0].getOrigin();
Class layoutType = layoutOrigin.getType();
delegate = layoutRegistry.createSupportForLayout(layoutType);
if (delegate == null) {
if (layoutOrigin.getType() == LayoutManager.class
&& layoutOrigin.getCreationParameters().length == 0
&& layoutOrigin.getParentExpression() == null
&& "null".equals(layoutOrigin.getJavaCodeString(null, null))) { // NOI18N
// special case of null layout
delegate = new NullLayoutSupport();
} else if (layoutOrigin.getMetaObject() instanceof java.lang.reflect.Constructor
&& layoutOrigin.getCreationParameters().length == 0) {
// likely custom layout originally used as a bean because in palette,
// now not in palette anymore but let's do like if it still was
System.err.println("[WARNING] No support for " + layoutType.getName() + // NOI18N
" was found, trying to use default support like if the layout was in palette as a bean."); // NOI18N
LayoutSupportRegistry.registerSupportForLayout(layoutType.getName(), LayoutSupportRegistry.DEFAULT_SUPPORT);
delegate = new DefaultLayoutSupport(layoutOrigin.getType());
} else {
return false;
}
}
lmInstance = getPrimaryContainerDelegate().getLayout();
}
}
if (delegate == null) { // initialization from LayoutManager instance
Container contDel = getPrimaryContainerDelegate();
if (!(contDel instanceof InvalidComponent)) {
if (contDel.getComponentCount() == 0) {
// we can still handle only empty containers ...
lmInstance = contDel.getLayout();
delegate = lmInstance != null ?
layoutRegistry.createSupportForLayout(lmInstance.getClass()) :
new NullLayoutSupport();
} else {
RuntimeException ex = new IllegalArgumentException(
AbstractLayoutSupport.getBundle().getString("MSG_ERR_NonEmptyContainer")); // NOI18N
throw ex;
}
}
}
}
if (delegate == null)
return false;
if (initialize) {
setLayoutDelegate(delegate, lmInstance, fromCode);
} else {
layoutDelegate = delegate;
needInit = true;
initializeFromInstance = lmInstance != null;
initializeFromCode = fromCode;
}
return true;
}
public void initializeLayoutDelegate() throws Exception {
if (layoutDelegate != null && needInit) {
LayoutManager lmInstance = initializeFromInstance ?
getPrimaryContainerDelegate().getLayout() : null;
layoutDelegate.initialize(this, lmInstance, initializeFromCode);
fillLayout(null);
getPropertySets(); // force properties and listeners creation
needInit = false;
}
}
public void setLayoutDelegate(LayoutSupportDelegate newDelegate,
LayoutManager lmInstance,
boolean fromCode)
throws Exception
{
LayoutConstraints[] oldConstraints;
LayoutSupportDelegate oldDelegate = layoutDelegate;
if (layoutDelegate != null
&& (layoutDelegate != newDelegate || !fromCode))
oldConstraints = removeLayoutDelegate(true);
else
oldConstraints = null;
layoutDelegate = newDelegate;
propertySets = null;
needInit = false;
if (layoutDelegate != null) {
try {
layoutDelegate.initialize(this, lmInstance, fromCode);
if (!fromCode)
fillLayout(oldConstraints);
getPropertySets(); // force properties and listeners creation
}
catch (Exception ex) {
removeLayoutDelegate(false);
layoutDelegate = oldDelegate;
if (layoutDelegate != null)
fillLayout(null);
throw ex;
}
}
}
public LayoutSupportDelegate getLayoutDelegate() {
return layoutDelegate;
}
public static LayoutSupportDelegate getLayoutDelegateForDefaultLayout(
FormModel formModel, LayoutManager layout) throws Exception {
LayoutSupportDelegate defaultLayoutDelegate;
if (layout == null) {
defaultLayoutDelegate = new NullLayoutSupport();
} else {
LayoutSupportRegistry layoutRegistry = LayoutSupportRegistry.getRegistry(formModel);
defaultLayoutDelegate = layoutRegistry.createSupportForLayout(layout.getClass());
if (defaultLayoutDelegate == null) {
defaultLayoutDelegate = new UnknownLayoutSupport();
}
}
return defaultLayoutDelegate;
}
public void setUnknownLayoutDelegate(boolean fromCode) {
try {
setLayoutDelegate(new UnknownLayoutSupport(), null, fromCode);
}
catch (Exception ex) { // nothing should happen, ignore
ex.printStackTrace();
}
}
public boolean isUnknownLayout() {
return layoutDelegate == null
|| layoutDelegate instanceof UnknownLayoutSupport;
}
public boolean isSpecialLayout() {
// Every standard layout manager has its own layout delegate.
// Hence, the DefaultLayoutSupport is used by special layout managers only.
return layoutDelegate instanceof DefaultLayoutSupport;
}
public boolean hasComponentConstraints() {
if (layoutDelegate != null) {
for (int i=0, n=getComponentCount(); i < n; i++) {
if (layoutDelegate.getConstraints(i) != null) {
return true;
}
}
}
return false;
}
// copy layout delegate from another container
public void copyLayoutDelegateFrom(
LayoutSupportManager sourceLayoutSupport,
RADVisualComponent[] newMetaComps)
{
LayoutSupportDelegate sourceDelegate =
sourceLayoutSupport.getLayoutDelegate();
int componentCount = sourceDelegate.getComponentCount();
Container cont = getPrimaryContainer();
Container contDel = getPrimaryContainerDelegate();
if (layoutDelegate != null)
removeLayoutDelegate(false);
CodeExpression[] compExps = new CodeExpression[componentCount];
Component[] primaryComps = new Component[componentCount];
for (int i=0; i < componentCount; i++) {
RADVisualComponent metacomp = newMetaComps[i];
compExps[i] = metacomp.getCodeExpression();
primaryComps[i] = (Component) metacomp.getBeanInstance();
ensureFakePeerAttached(primaryComps[i]);
}
LayoutSupportDelegate newDelegate =
sourceDelegate.cloneLayoutSupport(this, compExps);
newDelegate.setLayoutToContainer(cont, contDel);
newDelegate.addComponentsToContainer(cont, contDel, primaryComps, 0);
layoutDelegate = newDelegate;
// Ensure correct propagation of copied properties (issue 50011, 72351)
try {
layoutDelegate.acceptContainerLayoutChange(null);
} catch (PropertyVetoException pvex) {
// should not happen
}
}
public void clearPrimaryContainer() {
layoutDelegate.clearContainer(getPrimaryContainer(),
getPrimaryContainerDelegate());
}
public RADVisualContainer getMetaContainer() {
return metaContainer;
}
// public boolean supportsArranging() {
// return layoutDelegate instanceof LayoutSupportArranging;
// }
private LayoutConstraints[] removeLayoutDelegate(
boolean extractConstraints)
{
CodeGroup code = layoutDelegate.getLayoutCode();
if (code != null)
CodeStructure.removeStatements(code.getStatementsIterator());
int componentCount = layoutDelegate.getComponentCount();
LayoutConstraints[] constraints = null;
if (componentCount > 0) {
RADVisualComponent[] metacomps = metaContainer.getSubComponents();
if (metacomps.length == componentCount) { // robustness: might be called after failed layout initialization
if (extractConstraints)
constraints = new LayoutConstraints[componentCount];
for (int i=0; i < componentCount; i++) {
LayoutConstraints constr = layoutDelegate.getConstraints(i);
if (extractConstraints)
constraints[i] = constr;
if (constr != null)
metacomps[i].setLayoutConstraints(layoutDelegate.getClass(),
constr);
code = layoutDelegate.getComponentCode(i);
if (code != null)
CodeStructure.removeStatements(code.getStatementsIterator());
}
}
}
layoutDelegate.removeAll();
layoutDelegate.clearContainer(getPrimaryContainer(),
getPrimaryContainerDelegate());
layoutDelegate = null;
return constraints;
}
private void fillLayout(LayoutConstraints[] oldConstraints) {
RADVisualComponent[] metacomps = metaContainer.getSubComponents();
int componentCount = metacomps.length;
CodeExpression[] compExps = new CodeExpression[componentCount];
Component[] designComps = new Component[componentCount];
Component[] primaryComps = new Component[componentCount];
LayoutConstraints[] newConstraints = new LayoutConstraints[componentCount];
FormDesigner designer = FormEditor.getFormDesigner(metaContainer.getFormModel());
for (int i=0; i < componentCount; i++) {
RADVisualComponent metacomp = metacomps[i];
compExps[i] = metacomp.getCodeExpression();
primaryComps[i] = (Component) metacomp.getBeanInstance();
ensureFakePeerAttached(primaryComps[i]);
newConstraints[i] = metacomp.getLayoutConstraints(
layoutDelegate.getClass());
Component comp = designer != null ?
(Component) designer.getComponent(metacomp) : null;
designComps[i] = comp != null ?
comp : (Component) metacomp.getBeanInstance();
}
if (metaContainer.getFormModel().isUndoRedoRecording()) {
layoutDelegate.convertConstraints(oldConstraints,
newConstraints,
designComps);
} // otherwise in undo/redo - don't try to convert constraints
if (componentCount > 0) {
layoutDelegate.acceptNewComponents(compExps, newConstraints, 0);
layoutDelegate.addComponents(compExps, newConstraints, 0);
for (int i=0; i < componentCount; i++)
metacomps[i].resetConstraintsProperties();
}
// setup primary container
Container cont = getPrimaryContainer();
Container contDel = getPrimaryContainerDelegate();
// layoutDelegate.clearContainer(cont, contDel);
if (metaContainer.isDefaultLayoutDelegate(layoutDelegate) && layoutDelegate.getSupportedClass() == null) {
// e.g. UnknownLayoutSupport can't set the layout
contDel.setLayout(metaContainer.getDefaultLayout());
}
layoutDelegate.setLayoutToContainer(cont, contDel);
if (componentCount > 0) {
layoutDelegate.addComponentsToContainer(cont, contDel, primaryComps, 0);
}
}
// ---------
// public API delegated to LayoutSupportDelegate
public boolean isDedicated() {
return layoutDelegate.isDedicated();
}
public Class getSupportedClass() {
return layoutDelegate.getSupportedClass();
}
// node presentation
public boolean shouldHaveNode() {
return layoutDelegate.shouldHaveNode();
}
public String getDisplayName() {
return layoutDelegate.getDisplayName();
}
public Image getIcon(int type) {
return layoutDelegate.getIcon(type);
}
// properties and customizer
public Node.PropertySet[] getPropertySets() {
if (propertySets == null) {
if (layoutDelegate == null) return new Node.PropertySet[0]; // Issue 63916
propertySets = layoutDelegate.getPropertySets();
for (int i=0; i < propertySets.length; i++) {
Node.Property[] props = propertySets[i].getProperties();
for (int j=0; j < props.length; j++)
if (props[j] instanceof FormProperty) {
FormProperty prop = (FormProperty) props[j];
prop.addVetoableChangeListener(getLayoutListener());
prop.addPropertyChangeListener(getLayoutListener());
}
}
}
return propertySets;
}
public Node.Property[] getAllProperties() {
if (layoutDelegate instanceof AbstractLayoutSupport)
return ((AbstractLayoutSupport)layoutDelegate).getAllProperties();
java.util.List<Node.Property> allPropsList = new ArrayList<Node.Property>();
for (int i=0; i < propertySets.length; i++) {
Node.Property[] props = propertySets[i].getProperties();
for (int j=0; j < props.length; j++)
allPropsList.add(props[j]);
}
Node.Property[] allProperties = new Node.Property[allPropsList.size()];
allPropsList.toArray(allProperties);
return allProperties;
}
public Node.Property getLayoutProperty(String name) {
if (layoutDelegate instanceof AbstractLayoutSupport)
return ((AbstractLayoutSupport)layoutDelegate).getProperty(name);
Node.Property[] properties = getAllProperties();
for (int i=0; i < properties.length; i++)
if (name.equals(properties[i].getName()))
return properties[i];
return null;
}
public boolean isLayoutPropertyChangedFromInitial(FormProperty prop) {
if (layoutDelegate instanceof AbstractLayoutSupport) {
return ((AbstractLayoutSupport)layoutDelegate).isPropertyChangedFromInitial(prop);
}
return prop.isChanged();
}
public Class getCustomizerClass() {
return layoutDelegate.getCustomizerClass();
}
public Component getSupportCustomizer() {
return layoutDelegate.getSupportCustomizer();
}
// code meta data
public CodeGroup getLayoutCode() {
return layoutDelegate.getLayoutCode();
}
public CodeGroup getComponentCode(int index) {
return layoutDelegate.getComponentCode(index);
}
public CodeGroup getComponentCode(RADVisualComponent metacomp) {
int index = metaContainer.getIndexOf(metacomp);
return index >= 0 && index < layoutDelegate.getComponentCount() ?
layoutDelegate.getComponentCode(index) : null;
}
public int getComponentCount() {
return layoutDelegate.getComponentCount();
}
// data validation
public void acceptNewComponents(RADVisualComponent[] components,
LayoutConstraints[] constraints,
int index)
{
CodeExpression[] compExps = new CodeExpression[components.length];
for (int i=0; i < components.length; i++)
compExps[i] = components[i].getCodeExpression();
layoutDelegate.acceptNewComponents(compExps, constraints, index);
}
// components adding/removing
public void addComponents(RADVisualComponent[] components,
LayoutConstraints[] constraints,
int index)
{
CodeExpression[] compExps = new CodeExpression[components.length];
Component[] comps = new Component[components.length];
for (int i=0; i < components.length; i++) {
compExps[i] = components[i].getCodeExpression();
comps[i] = (Component) components[i].getBeanInstance();
ensureFakePeerAttached(comps[i]);
}
if (index < 0)
index = layoutDelegate.getComponentCount();
layoutDelegate.addComponents(compExps, constraints, index);
for (int i=0; i < components.length; i++)
components[i].resetConstraintsProperties();
layoutDelegate.addComponentsToContainer(getPrimaryContainer(),
getPrimaryContainerDelegate(),
comps, index);
}
public void removeComponent(RADVisualComponent metacomp, int index) {
// first store constraints in the meta component
LayoutConstraints constr = layoutDelegate.getConstraints(index);
if (constr != null)
metacomp.setLayoutConstraints(layoutDelegate.getClass(), constr);
// remove code
CodeStructure.removeStatements(
layoutDelegate.getComponentCode(index).getStatementsIterator());
// remove the component from layout
layoutDelegate.removeComponent(index);
// remove the component instance from the primary container instance
if (!layoutDelegate.removeComponentFromContainer(
getPrimaryContainer(),
getPrimaryContainerDelegate(),
(Component)metacomp.getBeanInstance()))
{ // layout delegate does not support removing individual components,
// so we clear the container and add the remaining components again
layoutDelegate.clearContainer(getPrimaryContainer(),
getPrimaryContainerDelegate());
RADVisualComponent[] metacomps = metaContainer.getSubComponents();
if (metacomps.length > 1) {
// we rely on that metacomp was not removed from the model yet
Component[] comps = new Component[metacomps.length-1];
for (int i=0; i < metacomps.length; i++) {
if (i != index) {
Component comp = (Component) metacomps[i].getBeanInstance();
ensureFakePeerAttached(comp);
comps[i < index ? i : i-1] = comp;
}
}
layoutDelegate.addComponentsToContainer(
getPrimaryContainer(),
getPrimaryContainerDelegate(),
comps,
0);
}
}
}
public void removeAll() {
// first store constraints in meta components
RADVisualComponent[] components = metaContainer.getSubComponents();
for (int i=0; i < components.length; i++) {
LayoutConstraints constr =
layoutDelegate.getConstraints(i);
if (constr != null)
components[i].setLayoutConstraints(layoutDelegate.getClass(),
constr);
}
// remove code of all components
for (int i=0, n=layoutDelegate.getComponentCount(); i < n; i++)
CodeStructure.removeStatements(
layoutDelegate.getComponentCode(i).getStatementsIterator());
// remove components from layout
layoutDelegate.removeAll();
// clear the primary container instance
layoutDelegate.clearContainer(getPrimaryContainer(),
getPrimaryContainerDelegate());
}
public boolean isLayoutChanged() {
Container defaultContainer = (Container)
BeanSupport.getDefaultInstance(metaContainer.getBeanClass());
Container defaultContDelegate =
metaContainer.getContainerDelegate(defaultContainer);
return layoutDelegate.isLayoutChanged(defaultContainer,
defaultContDelegate);
}
// managing constraints
public LayoutConstraints getConstraints(int index) {
return layoutDelegate.getConstraints(index);
}
public LayoutConstraints getConstraints(RADVisualComponent metacomp) {
if (layoutDelegate == null)
return null;
int index = metaContainer.getIndexOf(metacomp);
return index >= 0 && index < layoutDelegate.getComponentCount() ?
layoutDelegate.getConstraints(index) : null;
}
public static LayoutConstraints storeConstraints(
RADVisualComponent metacomp)
{
RADVisualContainer parent = metacomp.getParentContainer();
if (parent == null)
return null;
LayoutSupportManager layoutSupport = parent.getLayoutSupport();
if (layoutSupport == null)
return null;
LayoutConstraints constr = layoutSupport.getConstraints(metacomp);
if (constr != null)
metacomp.setLayoutConstraints(
layoutSupport.getLayoutDelegate().getClass(),
constr);
return constr;
}
public LayoutConstraints getStoredConstraints(RADVisualComponent metacomp) {
return metacomp.getLayoutConstraints(layoutDelegate.getClass());
}
// managing live components
public void setLayoutToContainer(Container container,
Container containerDelegate)
{
layoutDelegate.setLayoutToContainer(container, containerDelegate);
}
public void addComponentsToContainer(Container container,
Container containerDelegate,
Component[] components,
int index)
{
layoutDelegate.addComponentsToContainer(container, containerDelegate,
components, index);
}
public boolean removeComponentFromContainer(Container container,
Container containerDelegate,
Component component)
{
return layoutDelegate.removeComponentFromContainer(
container, containerDelegate, component);
}
public boolean clearContainer(Container container,
Container containerDelegate)
{
return layoutDelegate.clearContainer(container, containerDelegate);
}
// drag and drop support
public LayoutConstraints getNewConstraints(Container container,
Container containerDelegate,
Component component,
int index,
Point posInCont,
Point posInComp)
{
LayoutConstraints constraints = layoutDelegate.getNewConstraints(container, containerDelegate,
component, index,
posInCont, posInComp);
String context = null;
Object[] params = null;
if (layoutDelegate instanceof AbstractLayoutSupport) {
AbstractLayoutSupport support = (AbstractLayoutSupport)layoutDelegate;
context = support.getAssistantContext();
params = support.getAssistantParams();
}
context = (context == null) ? "generalPosition" : context; // NOI18N
FormEditor.getAssistantModel(metaContainer.getFormModel()).setContext(context, params);
return constraints;
}
public int getNewIndex(Container container,
Container containerDelegate,
Component component,
int index,
Point posInCont,
Point posInComp)
{
return layoutDelegate.getNewIndex(container, containerDelegate,
component, index,
posInCont, posInComp);
}
public boolean paintDragFeedback(Container container,
Container containerDelegate,
Component component,
LayoutConstraints newConstraints,
int newIndex,
Graphics g)
{
return layoutDelegate.paintDragFeedback(container, containerDelegate,
component,
newConstraints, newIndex,
g);
}
// resizing support
public int getResizableDirections(Container container,
Container containerDelegate,
Component component,
int index)
{
return layoutDelegate.getResizableDirections(container,
containerDelegate,
component, index);
}
public LayoutConstraints getResizedConstraints(Container container,
Container containerDelegate,
Component component,
int index,
Rectangle originalBounds,
Insets sizeChanges,
Point posInCont)
{
return layoutDelegate.getResizedConstraints(container,
containerDelegate,
component, index,
originalBounds,
sizeChanges,
posInCont);
}
// arranging support
public void processMouseClick(Point p,
Container cont,
Container contDelegate)
{
layoutDelegate.processMouseClick(p, cont, contDelegate);
}
// arranging support
public void selectComponent(int index) {
layoutDelegate.selectComponent(index);
}
// arranging support
public void arrangeContainer(Container container,
Container containerDelegate)
{
layoutDelegate.arrangeContainer(container, containerDelegate);
}
// -----------
// API for layout delegates (LayoutSupportContext implementation)
@Override
public CodeStructure getCodeStructure() {
return codeStructure;
}
@Override
public CodeExpression getContainerCodeExpression() {
if (containerCodeExpression == null) {
containerCodeExpression = metaContainer.getCodeExpression();
containerDelegateCodeExpression = null;
}
return containerCodeExpression;
}
@Override
public CodeExpression getContainerDelegateCodeExpression() {
if (containerDelegateCodeExpression == null) {
containerDelegateCodeExpression =
containerDelegateCodeExpression(metaContainer, codeStructure);
}
return containerDelegateCodeExpression;
}
public static CodeExpression containerDelegateCodeExpression(
RADVisualContainer metaContainer,
CodeStructure codeStructure)
{
CodeExpression containerCodeExpression = metaContainer.getCodeExpression();
CodeExpression containerDelegateCodeExpression;
java.lang.reflect.Method delegateGetter =
metaContainer.getContainerDelegateMethod();
if (delegateGetter != null) { // there should be a container delegate
Iterator it = CodeStructure.getDefinedExpressionsIterator(
containerCodeExpression);
CodeExpression[] expressions = CodeStructure.filterExpressions(
it, delegateGetter);
if (expressions.length > 0) {
// the expresion for the container delegate already exists
containerDelegateCodeExpression = expressions[0];
}
else { // create a new expresion for the container delegate
CodeExpressionOrigin origin = CodeStructure.createOrigin(
containerCodeExpression,
delegateGetter,
null);
containerDelegateCodeExpression =
codeStructure.createExpression(origin);
}
}
else // no special container delegate
containerDelegateCodeExpression = containerCodeExpression;
return containerDelegateCodeExpression;
}
// return container instance of meta container
@Override
public Container getPrimaryContainer() {
return (Container) metaContainer.getBeanInstance();
}
// return container delegate of container instance of meta container
@Override
public Container getPrimaryContainerDelegate() {
Container defCont = (Container) metaContainer.getBeanInstance();
if (primaryContainerDelegate == null || primaryContainer != defCont) {
primaryContainer = defCont;
primaryContainerDelegate =
metaContainer.getContainerDelegate(defCont);
}
return primaryContainerDelegate;
}
// return initial layout of the primary container delegate
@Override
public LayoutManager getDefaultLayoutInstance() {
return metaContainer.getDefaultLayout();
}
// return component instance of meta component
@Override
public Component getPrimaryComponent(int index) {
return (Component) metaContainer.getSubComponent(index).getBeanInstance();
}
@Override
public void updatePrimaryContainer() {
Container cont = getPrimaryContainer();
Container contDel = getPrimaryContainerDelegate();
layoutDelegate.clearContainer(cont, contDel);
layoutDelegate.setLayoutToContainer(cont, contDel);
RADVisualComponent[] components = metaContainer.getSubComponents();
if (components.length > 0) {
Component[] comps = new Component[components.length];
for (int i=0; i < components.length; i++) {
comps[i] = (Component) components[i].getBeanInstance();
ensureFakePeerAttached(comps[i]);
}
layoutDelegate.addComponentsToContainer(cont, contDel, comps, 0);
}
}
@Override
public void containerLayoutChanged(PropertyChangeEvent ev)
throws PropertyVetoException
{
if (ev != null && ev.getPropertyName() != null) {
layoutDelegate.acceptContainerLayoutChange(getEventWithValues(ev));
FormModel formModel = metaContainer.getFormModel();
formModel.fireContainerLayoutChanged(metaContainer,
ev.getPropertyName(),
ev.getOldValue(),
ev.getNewValue());
}
else propertySets = null;
LayoutNode node = metaContainer.getLayoutNodeReference();
if (node != null) {
// propagate the change to node
if (ev != null && ev.getPropertyName() != null)
node.fireLayoutPropertiesChange();
else
node.fireLayoutPropertySetsChange();
}
}
@Override
public void componentLayoutChanged(int index, PropertyChangeEvent ev)
throws PropertyVetoException
{
RADVisualComponent metacomp = metaContainer.getSubComponent(index);
if (ev != null && ev.getPropertyName() != null) {
layoutDelegate.acceptComponentLayoutChange(index,
getEventWithValues(ev));
FormModel formModel = metaContainer.getFormModel();
formModel.fireComponentLayoutChanged(metacomp,
ev.getPropertyName(),
ev.getOldValue(),
ev.getNewValue());
if (metacomp.getNodeReference() != null) // propagate the change to node
metacomp.getNodeReference().firePropertyChangeHelper(
// null, null, null);
ev.getPropertyName(),
ev.getOldValue(),
ev.getNewValue());
}
else {
if (metacomp.getNodeReference() != null) // propagate the change to node
metacomp.getNodeReference().fireComponentPropertySetsChange();
metacomp.resetConstraintsProperties();
}
}
private static PropertyChangeEvent getEventWithValues(PropertyChangeEvent ev) {
Object oldVal = ev.getOldValue();
Object newVal = ev.getNewValue();
if (oldVal instanceof FormProperty.ValueWithEditor)
ev = new PropertyChangeEvent(
ev.getSource(),
ev.getPropertyName(),
((FormProperty.ValueWithEditor)oldVal).getValue(),
((FormProperty.ValueWithEditor)newVal).getValue());
return ev;
}
// ---------
private LayoutListener getLayoutListener() {
if (layoutListener == null)
layoutListener = new LayoutListener();
return layoutListener;
}
private class LayoutListener implements VetoableChangeListener,
PropertyChangeListener
{
@Override
public void vetoableChange(PropertyChangeEvent ev)
throws PropertyVetoException
{
Object source = ev.getSource();
String eventName = ev.getPropertyName();
if (source instanceof FormProperty
&& (FormProperty.PROP_VALUE.equals(eventName)
|| FormProperty.PROP_VALUE_AND_EDITOR.equals(eventName)))
{
ev = new PropertyChangeEvent(layoutDelegate,
((FormProperty)source).getName(),
ev.getOldValue(),
ev.getNewValue());
containerLayoutChanged(ev);
}
}
@Override
public void propertyChange(PropertyChangeEvent ev) {
Object source = ev.getSource();
if (source instanceof FormProperty
&& FormProperty.CURRENT_EDITOR.equals(ev.getPropertyName()))
{
ev = new PropertyChangeEvent(layoutDelegate,
null, null, null);
try {
containerLayoutChanged(ev);
}
catch (PropertyVetoException ex) {} // should not happen
}
}
}
private static void ensureFakePeerAttached(Component comp) {
// This method is called for components to be added to a container.
// It might happen that the component is still in another container
// (by error) and then when removed from this container before adding
// to the new one, the peer would be null-ed. Trying to prevent this by
// removing the component before attaching the fake peer. (For bug 115431.)
if (comp != null && comp.getParent() != null) {
comp.getParent().remove(comp);
}
FakePeerSupport.attachFakePeer(comp);
if (comp instanceof Container)
FakePeerSupport.attachFakePeerRecursively((Container)comp);
}
}
|
<reponame>lack/kubernetes<filename>cmd/kube-controller-manager/app/certificates_test.go
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
csrsigningconfig "k8s.io/kubernetes/pkg/controller/certificates/signer/config"
)
func TestCertSpecified(t *testing.T) {
allConfig := csrsigningconfig.CSRSigningControllerConfiguration{
ClusterSigningCertFile: "/cluster-signing-cert",
ClusterSigningKeyFile: "/cluster-signing-key",
ClusterSigningDuration: metav1.Duration{Duration: 10 * time.Hour},
KubeletServingSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kubelet-serving/cert-file",
KeyFile: "/cluster-signing-kubelet-serving/key-file",
},
KubeletClientSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kubelet-client/cert-file",
KeyFile: "/cluster-signing-kubelet-client/key-file",
},
KubeAPIServerClientSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kube-apiserver-client/cert-file",
KeyFile: "/cluster-signing-kube-apiserver-client/key-file",
},
LegacyUnknownSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-legacy-unknown/cert-file",
KeyFile: "/cluster-signing-legacy-unknown/key-file",
},
}
defaultOnly := csrsigningconfig.CSRSigningControllerConfiguration{
ClusterSigningCertFile: "/cluster-signing-cert",
ClusterSigningKeyFile: "/cluster-signing-key",
ClusterSigningDuration: metav1.Duration{Duration: 10 * time.Hour},
}
specifiedOnly := csrsigningconfig.CSRSigningControllerConfiguration{
KubeletServingSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kubelet-serving/cert-file",
KeyFile: "/cluster-signing-kubelet-serving/key-file",
},
KubeletClientSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kubelet-client/cert-file",
KeyFile: "/cluster-signing-kubelet-client/key-file",
},
KubeAPIServerClientSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kube-apiserver-client/cert-file",
KeyFile: "/cluster-signing-kube-apiserver-client/key-file",
},
LegacyUnknownSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-legacy-unknown/cert-file",
KeyFile: "/cluster-signing-legacy-unknown/key-file",
},
}
halfASpecified := csrsigningconfig.CSRSigningControllerConfiguration{
ClusterSigningCertFile: "/cluster-signing-cert",
ClusterSigningKeyFile: "/cluster-signing-key",
ClusterSigningDuration: metav1.Duration{Duration: 10 * time.Hour},
KubeletServingSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kubelet-serving/cert-file",
KeyFile: "/cluster-signing-kubelet-serving/key-file",
},
KubeletClientSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kubelet-client/cert-file",
KeyFile: "/cluster-signing-kubelet-client/key-file",
},
}
halfBSpecified := csrsigningconfig.CSRSigningControllerConfiguration{
ClusterSigningCertFile: "/cluster-signing-cert",
ClusterSigningKeyFile: "/cluster-signing-key",
ClusterSigningDuration: metav1.Duration{Duration: 10 * time.Hour},
KubeAPIServerClientSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kube-apiserver-client/cert-file",
KeyFile: "/cluster-signing-kube-apiserver-client/key-file",
},
LegacyUnknownSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-legacy-unknown/cert-file",
KeyFile: "/cluster-signing-legacy-unknown/key-file",
},
}
tests := []struct {
name string
config csrsigningconfig.CSRSigningControllerConfiguration
specifiedFn func(config csrsigningconfig.CSRSigningControllerConfiguration) bool
expectedSpecified bool
filesFn func(config csrsigningconfig.CSRSigningControllerConfiguration) (string, string)
expectedCert string
expectedKey string
}{
{
name: "allConfig-KubeletServingSignerFilesSpecified",
config: allConfig,
specifiedFn: areKubeletServingSignerFilesSpecified,
expectedSpecified: true,
filesFn: getKubeletServingSignerFiles,
expectedCert: "/cluster-signing-kubelet-serving/cert-file",
expectedKey: "/cluster-signing-kubelet-serving/key-file",
},
{
name: "defaultOnly-KubeletServingSignerFilesSpecified",
config: defaultOnly,
specifiedFn: areKubeletServingSignerFilesSpecified,
expectedSpecified: false,
filesFn: getKubeletServingSignerFiles,
expectedCert: "/cluster-signing-cert",
expectedKey: "/cluster-signing-key",
},
{
name: "specifiedOnly-KubeletServingSignerFilesSpecified",
config: specifiedOnly,
specifiedFn: areKubeletServingSignerFilesSpecified,
expectedSpecified: true,
filesFn: getKubeletServingSignerFiles,
expectedCert: "/cluster-signing-kubelet-serving/cert-file",
expectedKey: "/cluster-signing-kubelet-serving/key-file",
},
{
name: "halfASpecified-KubeletServingSignerFilesSpecified",
config: halfASpecified,
specifiedFn: areKubeletServingSignerFilesSpecified,
expectedSpecified: true,
filesFn: getKubeletServingSignerFiles,
expectedCert: "/cluster-signing-kubelet-serving/cert-file",
expectedKey: "/cluster-signing-kubelet-serving/key-file",
},
{
name: "halfBSpecified-KubeletServingSignerFilesSpecified",
config: halfBSpecified,
specifiedFn: areKubeletServingSignerFilesSpecified,
expectedSpecified: false,
filesFn: getKubeletServingSignerFiles,
expectedCert: "",
expectedKey: "",
},
{
name: "allConfig-KubeletClientSignerFiles",
config: allConfig,
specifiedFn: areKubeletClientSignerFilesSpecified,
expectedSpecified: true,
filesFn: getKubeletClientSignerFiles,
expectedCert: "/cluster-signing-kubelet-client/cert-file",
expectedKey: "/cluster-signing-kubelet-client/key-file",
},
{
name: "defaultOnly-KubeletClientSignerFiles",
config: defaultOnly,
specifiedFn: areKubeletClientSignerFilesSpecified,
expectedSpecified: false,
filesFn: getKubeletClientSignerFiles,
expectedCert: "/cluster-signing-cert",
expectedKey: "/cluster-signing-key",
},
{
name: "specifiedOnly-KubeletClientSignerFiles",
config: specifiedOnly,
specifiedFn: areKubeletClientSignerFilesSpecified,
expectedSpecified: true,
filesFn: getKubeletClientSignerFiles,
expectedCert: "/cluster-signing-kubelet-client/cert-file",
expectedKey: "/cluster-signing-kubelet-client/key-file",
},
{
name: "halfASpecified-KubeletClientSignerFiles",
config: halfASpecified,
specifiedFn: areKubeletClientSignerFilesSpecified,
expectedSpecified: true,
filesFn: getKubeletClientSignerFiles,
expectedCert: "/cluster-signing-kubelet-client/cert-file",
expectedKey: "/cluster-signing-kubelet-client/key-file",
},
{
name: "halfBSpecified-KubeletClientSignerFiles",
config: halfBSpecified,
specifiedFn: areKubeletClientSignerFilesSpecified,
expectedSpecified: false,
filesFn: getKubeletClientSignerFiles,
expectedCert: "",
expectedKey: "",
},
{
name: "allConfig-KubeletClientSignerFiles",
config: allConfig,
specifiedFn: areKubeAPIServerClientSignerFilesSpecified,
expectedSpecified: true,
filesFn: getKubeAPIServerClientSignerFiles,
expectedCert: "/cluster-signing-kube-apiserver-client/cert-file",
expectedKey: "/cluster-signing-kube-apiserver-client/key-file",
},
{
name: "defaultOnly-KubeletClientSignerFiles",
config: defaultOnly,
specifiedFn: areKubeAPIServerClientSignerFilesSpecified,
expectedSpecified: false,
filesFn: getKubeAPIServerClientSignerFiles,
expectedCert: "/cluster-signing-cert",
expectedKey: "/cluster-signing-key",
},
{
name: "specifiedOnly-KubeletClientSignerFiles",
config: specifiedOnly,
specifiedFn: areKubeAPIServerClientSignerFilesSpecified,
expectedSpecified: true,
filesFn: getKubeAPIServerClientSignerFiles,
expectedCert: "/cluster-signing-kube-apiserver-client/cert-file",
expectedKey: "/cluster-signing-kube-apiserver-client/key-file",
},
{
name: "halfASpecified-KubeletClientSignerFiles",
config: halfASpecified,
specifiedFn: areKubeAPIServerClientSignerFilesSpecified,
expectedSpecified: false,
filesFn: getKubeAPIServerClientSignerFiles,
expectedCert: "",
expectedKey: "",
},
{
name: "halfBSpecified-KubeletClientSignerFiles",
config: halfBSpecified,
specifiedFn: areKubeAPIServerClientSignerFilesSpecified,
expectedSpecified: true,
filesFn: getKubeAPIServerClientSignerFiles,
expectedCert: "/cluster-signing-kube-apiserver-client/cert-file",
expectedKey: "/cluster-signing-kube-apiserver-client/key-file",
},
{
name: "allConfig-LegacyUnknownSignerFiles",
config: allConfig,
specifiedFn: areLegacyUnknownSignerFilesSpecified,
expectedSpecified: true,
filesFn: getLegacyUnknownSignerFiles,
expectedCert: "/cluster-signing-legacy-unknown/cert-file",
expectedKey: "/cluster-signing-legacy-unknown/key-file",
},
{
name: "defaultOnly-LegacyUnknownSignerFiles",
config: defaultOnly,
specifiedFn: areLegacyUnknownSignerFilesSpecified,
expectedSpecified: false,
filesFn: getLegacyUnknownSignerFiles,
expectedCert: "/cluster-signing-cert",
expectedKey: "/cluster-signing-key",
},
{
name: "specifiedOnly-LegacyUnknownSignerFiles",
config: specifiedOnly,
specifiedFn: areLegacyUnknownSignerFilesSpecified,
expectedSpecified: true,
filesFn: getLegacyUnknownSignerFiles,
expectedCert: "/cluster-signing-legacy-unknown/cert-file",
expectedKey: "/cluster-signing-legacy-unknown/key-file",
},
{
name: "halfASpecified-LegacyUnknownSignerFiles",
config: halfASpecified,
specifiedFn: areLegacyUnknownSignerFilesSpecified,
expectedSpecified: false,
filesFn: getLegacyUnknownSignerFiles,
expectedCert: "",
expectedKey: "",
},
{
name: "halfBSpecified-LegacyUnknownSignerFiles",
config: halfBSpecified,
specifiedFn: areLegacyUnknownSignerFilesSpecified,
expectedSpecified: true,
filesFn: getLegacyUnknownSignerFiles,
expectedCert: "/cluster-signing-legacy-unknown/cert-file",
expectedKey: "/cluster-signing-legacy-unknown/key-file",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
actualSpecified := test.specifiedFn(test.config)
if actualSpecified != test.expectedSpecified {
t.Error(actualSpecified)
}
actualCert, actualKey := test.filesFn(test.config)
if actualCert != test.expectedCert {
t.Error(actualCert)
}
if actualKey != test.expectedKey {
t.Error(actualKey)
}
})
}
}
|
// tr translates string and replaces meta value placeholder with values
//
// This function is auto-generated.
//
func (p attachmentActionProps) tr(in string, err error) string {
var (
pairs = []string{"{err}"}
fns = func(ii ...interface{}) string {
for _, i := range ii {
if s := fmt.Sprintf("%v", i); len(s) > 0 {
return s
}
}
return ""
}
)
if err != nil {
for {
ue := errors.Unwrap(err)
if ue == nil {
break
}
err = ue
}
pairs = append(pairs, err.Error())
} else {
pairs = append(pairs, "nil")
}
pairs = append(pairs, "{size}", fns(p.size))
pairs = append(pairs, "{name}", fns(p.name))
pairs = append(pairs, "{mimetype}", fns(p.mimetype))
pairs = append(pairs, "{url}", fns(p.url))
if p.attachment != nil {
pairs = append(
pairs,
"{attachment}",
fns(
p.attachment.Name,
p.attachment.Kind,
p.attachment.Url,
p.attachment.PreviewUrl,
p.attachment.Meta,
p.attachment.OwnerID,
p.attachment.ID,
p.attachment.NamespaceID,
),
)
pairs = append(pairs, "{attachment.name}", fns(p.attachment.Name))
pairs = append(pairs, "{attachment.kind}", fns(p.attachment.Kind))
pairs = append(pairs, "{attachment.url}", fns(p.attachment.Url))
pairs = append(pairs, "{attachment.previewUrl}", fns(p.attachment.PreviewUrl))
pairs = append(pairs, "{attachment.meta}", fns(p.attachment.Meta))
pairs = append(pairs, "{attachment.ownerID}", fns(p.attachment.OwnerID))
pairs = append(pairs, "{attachment.ID}", fns(p.attachment.ID))
pairs = append(pairs, "{attachment.namespaceID}", fns(p.attachment.NamespaceID))
}
if p.filter != nil {
pairs = append(
pairs,
"{filter}",
fns(
p.filter.Filter,
p.filter.Kind,
p.filter.Sort,
),
)
pairs = append(pairs, "{filter.filter}", fns(p.filter.Filter))
pairs = append(pairs, "{filter.kind}", fns(p.filter.Kind))
pairs = append(pairs, "{filter.sort}", fns(p.filter.Sort))
}
if p.namespace != nil {
pairs = append(
pairs,
"{namespace}",
fns(
p.namespace.Name,
p.namespace.Slug,
p.namespace.ID,
),
)
pairs = append(pairs, "{namespace.name}", fns(p.namespace.Name))
pairs = append(pairs, "{namespace.slug}", fns(p.namespace.Slug))
pairs = append(pairs, "{namespace.ID}", fns(p.namespace.ID))
}
if p.record != nil {
pairs = append(
pairs,
"{record}",
fns(
p.record.ID,
p.record.ModuleID,
p.record.NamespaceID,
),
)
pairs = append(pairs, "{record.ID}", fns(p.record.ID))
pairs = append(pairs, "{record.moduleID}", fns(p.record.ModuleID))
pairs = append(pairs, "{record.namespaceID}", fns(p.record.NamespaceID))
}
if p.page != nil {
pairs = append(
pairs,
"{page}",
fns(
p.page.Handle,
p.page.Title,
p.page.ID,
),
)
pairs = append(pairs, "{page.handle}", fns(p.page.Handle))
pairs = append(pairs, "{page.title}", fns(p.page.Title))
pairs = append(pairs, "{page.ID}", fns(p.page.ID))
}
if p.module != nil {
pairs = append(
pairs,
"{module}",
fns(
p.module.Handle,
p.module.Name,
p.module.ID,
),
)
pairs = append(pairs, "{module.handle}", fns(p.module.Handle))
pairs = append(pairs, "{module.name}", fns(p.module.Name))
pairs = append(pairs, "{module.ID}", fns(p.module.ID))
}
return strings.NewReplacer(pairs...).Replace(in)
} |
The primary purpose of this study was to investigate the effects of 8 weeks of a MIPS in resistance-trained individuals during a periodized resistance training program on skeletal muscle hypertrophy, lean body mass, and strength relative to a placebo matched control. The primary findings of this research were in congruence with our hypotheses that Xpand® 2X supplementation can improve adaptations in skeletal muscle hypertrophy, LBM, and strength.
Skeletal muscle hypertrophy and lean body mass
This MIPS contains a proprietary blend of ingredients reported previously to augment the accretion of skeletal muscle. For example, creatine monohydrate has been reported as the most effective ergogenic aid currently available regarding LBM and high-intensity exercise capacity, particularly in untrained individuals[14]. Creatine and its various forms have been thoroughly researched, yet to date no study has shown any form of creatine to be superior to creatine monohydrate, which was used in this study (23, 33). Supplementation with creatine can increase total resistance training volume via ATP re-synthesis[14], and total training volume has been closely linked with skeletal muscle accretion. Additionally, creatine supplementation has been demonstrated to increase the activation of satellite cells and myonuclei in muscle following chronic resistance training[15]. Moreover it is conceivable that the osmotic pressure created by creatine that increases the hydration status of cells, resulting in potentially hypertrophic effects[16]. This mechanism of action is what causes creatine to increase strength, but can benefit almost every body system, including the brain, bones, muscles, and liver[17]. Lastly, long term studies have observed that those supplementing with creatine experience 200% increases in LBM compared to placebo[14].
Branched chain amino acids have previously been shown as efficacious in the accretion of skeletal muscle mass[2]. One BCAA of particular interest is leucine, which has been shown to increase muscle protein synthesis (MPS) without the presence of the other essential amino acids[18] Additionally, Karlsson et al.[19] found that supplementation with BCAAs during resistance exercise results in greater phosphorylation of ribosomal S6 kinase, a rate limiting enzyme in the signaling network responsible for regulation of protein synthesis in skeletal muscles. Moreover, BCAAs seem to decrease soreness after eccentric exercise[20] and, they prevent declines in both testosterone and power following an overreaching cycle[21].
Beta-Alanine supplementation has consistently been demonstrated to augment muscle carnosine concentrations in humans[22–25]. Harris et al.[22] concluded that carnosine plays an essential role as an intracellular buffer within the skeletal muscle of humans. More importantly, beta-alanine supplementation has been shown to enhance physical performance during high intensity exercise bouts while also delaying the onset of neuromuscular fatigue[26].
Agmatine is a derivative of the amino acid arginine. Agmatine has been studied for its impact on nitric oxide, wellbeing, and hormone status[27, 28]. Agmatine has been noted to support nitric oxide (NO) production via stimulation of endothelial nitric oxide synthase (eNOS).[29–31]. This process is essential for the proper functioning of the polyamine biosynthetic pathways to occur[29–31]. The body’s organs require polyamines for their growth, renewal, and metabolism. Polyamines also have a profound stabilizing effect on a cell DNA and are essential to the healthy function of the nervous system[29, 30]. Therefore, these pathways, although not fully elucidated, play an important role in normal cell homeostasis.
Lastly, Creatinol-O-Phosphate (COP) is known primarily for its abilities as an intracellular buffer. Creatinol-O-Phosphate has been shown to assist in stabilizing intracellular and extracellular pH levels, ultimately prolonging anaerobic glycolysis in the presence of lactic acid[7]. Creatinol-O-Phosphate has also been shown to activate satellite cells in skeletal muscle, theoretically increasing their capacity for muscle growth[7]. To date this is the first study that we are aware of to analyze this specific combination of ingredients. |
NBC
“Potato Chip” isn’t the kind of thing that’s going to run in a “best of” montage during, say, SNL’s 40th anniversary show. Have you ever heard how certain comedians are referred to as “a comedian’s comedian,” in that the material makes other comedians laugh? “Potato Chip” is a lot like that. It’s the definition of a cult favorite.
On Dec. 5, 2009, SNL aired a show, hosted by Blake Lively, that featured Andy Samberg’s Swedish Chef in the monologue and a sketch about Tiger Woods’ infidelity (which seems crazy that that was seven years ago). Then the last sketch of the night aired.
The last sketch of the night — often referred to as the “10 to one” sketch, signifying about the time it airs on the East Coast — can often be, well, odd. It’s where things are often slotted that the cast likes, but mainstream audiences might not appreciate closer to 11:30 p.m. The quintessential “10 to one” sketch, written by Will Forte and John Solomon, is “Potato Chip.”
The sketch opens with an establishing shot of NASA, which then fades into a sad-looking office where an older Foghorn Leghorn-sounding man, played by Jason Sudeikis, is being interviewed for a job as an astronaut by a high-pitched, raspy-voiced man, played by Will Forte, who has a large bowl of potato chips on his desk. (Forte is raspy-voiced in this sketch because he blew out his voice performing this sketch at dress rehearsal. I have seen the dress rehearsal version and I can describe it by imagining what Forte does here, only amped up by a few degrees.)
Forte’s character has one rule: Do not take his potato chips. When Forte leaves the room, Sudeikis takes a potato chip. When Forte returns, a non-stop yelling match between Forte, Lively (who plays his assistant) and Sudeikis commences, ending with Sudeikis regurgitating the potato chip into Forte’s hand.
Ahead, the creative forces behind this sketch — Will Forte, Jason Sudeikis, John Solomon, and head writer Seth Meyers — reflect on what they all feel is one of the strangest things they’ve ever put on the air… a sketch that was actually performed three times in front of an audience, as it had been cut at dress rehearsal a month earlier when Taylor Swift hosted the show. (And maybe strangest of all: Sudeikis reveals that his would-be NASA employee character lived on to see another day as the judge in “Maine Justice.”) |
/* choose the suitable input current for charger */
static int hi6522_input_current_optimize(struct ico_input *input,
struct ico_output *output)
{
struct hi6522_device_info *di = g_hi6522_dev;
int bat_voltage = hisi_battery_voltage();
int bat_exist = is_hisi_battery_exist();
int bat_temp = hisi_battery_temperature();
int avg_voltage;
int ret;
unsigned int state = 0;
if ((di == NULL) || (input == NULL) || (output == NULL))
return -1;
if ((!bat_exist) || (input->charger_type != CHARGER_TYPE_STANDARD))
return -1;
ret = hi6522_get_charge_state(&state);
if (ret < 0) {
scharger_err("[%s]:get_charge_state fail!!\n", __func__);
return -1;
}
if (bat_temp > BAT_TEMP_50 || bat_temp < BAT_TEMP_0 ||
(state & CHAGRE_STATE_CHRG_DONE)) {
scharger_err("[%s]:batt_temp=%d,charge_state=%u\n",
__func__, bat_temp, state);
return -1;
}
avg_voltage =
mt_battery_average_method(&battery_voltage_buffer[0], bat_voltage);
standard_chk(&avg_voltage, di);
output->input_current = di->ico_iin;
return 0;
} |
<reponame>mlrv/H-99
{-
Construct a completely balanced binary tree.
In a completely balanced binary tree, the following property holds for every node:
The number of nodes in its left subtree and the number of nodes in its right subtree
are almost equal, which means their difference is not greater than one.
Write a function complBalTree to construct completely balanced binary
trees for a given number of nodes.
-}
data Tree a = Empty | Branch a (Tree a) (Tree a)
deriving (Show, Eq)
complBalTree :: Int -> [Tree Char]
complBalTree 0 = [Empty]
complBalTree n = let (q, r) = (n - 1) `quotRem` 2
in [Branch 'x' left right | i <- [q..(q + r)],
left <- complBalTree i,
right <- complBalTree (n - i - 1)] |
class CustomLogger:
"""
This shims `logging.Logger` without using inheritance, since we want to
leverage `logging.getLogger`.
"""
def __init__(
self,
log: logging.Logger,
log_format: str = '%(message)s',
) -> None:
# To prevent infinite recursion, we explicitly call `object`'s setattr.
# This also sets it as an instance variable, rather than calling the
# CustomLogger's `__setattr__`, which will set it as a variable on
# `self.log`.
super().__setattr__('log', log)
super().__setattr__('log_format', log_format)
def set_debug_level(self, debug_level: int) -> None:
"""
:param debug_level: configure verbosity of log (between 0-2)
"""
mapping = {
# Anything over INFO level.
0: logging.INFO + 1,
1: logging.INFO,
2: logging.DEBUG,
}
self.setLevel(
mapping[min(debug_level, 2)],
)
def clear_stream(self) -> None:
"""It's easier to create a new stream, rather than clearing it."""
# Attach stream to log instance, so can be accessible publically.
self.stream = StringIO()
handler = logging.StreamHandler(self.stream)
handler.setFormatter(
logging.Formatter(self.log_format),
)
if not self.handlers:
self.addHandler(handler)
else:
self.handlers[0] = handler
def __getattr__(self, name: str) -> Any:
return getattr(self.log, name)
def __setattr__(self, name: str, value: Any) -> None:
setattr(self.log, name, value) |
Hey All,It seemed like an impossible task. To get more the two audio channels into and out from the Pi over the GPIO header. Lots of people wanted that functionality but no one could work out how to do it. I decided this product would have the potential to completely enable and transform other peoples projects. Not only, that if it could be done on the Pi, it could be done on most other embeded boards. It would empower people by solving what seemed impossible. Beyond that, the outcomes for the developing world would be to put an ultra low cost multitrack recorder into the hands of people who otherwise may never be able to afford it - a sub $150 professional grade multitrack recorder. Who knows what people would invent with it !I put together a plan and began hacking from paradigm to paradigm, making small incremental steps. It felt like I was building a rocket going to the moon for the first time ! No one ever thinks of low level hardware/software hacking as sexy, so it never makes the newsAfter months and months of struggling, first with purely analogue electronics, ECL logic and all sorts of tricks, I got really close. Sooo close I could sense the signals were going in the right direction - it just needed a little more. But that little more certainly wasn't going to come from analogue electronics alone. I had to change directions.I decided to learn VHDL and started using FPGAs. On the weekends, whilst guarding my daughter's infant sleep cycles, I would read free range VHDL on my phone or laptop, gradually grasping this non procedural way of "the hardware". Finally after months, at the digital hardware level, I was able to get 8 channels of audio into and out from the Pi hat. Eureka! It is incredibly reliable. So functionally attractive and reliable that it had to be let out of the box ! And so it began ....Mattp.s. It is arguable that $150 is still well out of reach for a lot of people in developing nations. |
interface HttpRequestOptions {
method?: string;
url: string;
referer?: string;
resolveWithFullResponse?: boolean;
form?: any;
body?: any;
}
interface GraphQLRequestOptions {
origin?: string;
referer?: string;
query: string;
variables?: object;
}
interface Credit {
session?: string;
csrfToken: string;
}
declare enum ProblemStatus {
"Accept" = 0,
"Not Accept" = 1,
"Not Start" = 2
}
declare enum ProblemDifficulty {
"Easy" = 0,
"Medium" = 1,
"Hard" = 2
}
declare enum SubmissionStatus {
"Accepted" = 0,
"Compile Error" = 1,
"Wrong Answer" = 2,
"Time Limit Exceeded" = 3
}
declare enum EndPoint {
"US" = 0,
"CN" = 1
}
interface Uris {
base: string;
login: string;
graphql: string;
problemsAll: string;
problem: string;
submit: string;
submission: string;
}
export { HttpRequestOptions, GraphQLRequestOptions, Credit, ProblemStatus, ProblemDifficulty, SubmissionStatus, EndPoint, Uris, };
|
<filename>tests/test_calculator2/main.cpp<gh_stars>0
#include <iostream>
#include <functional>
#include <cmath>
#include "cpp-formula.h"
using namespace std;
using namespace fizvlad;
int main()
{
string str = "";
Formula::Action a_brackets(1, R"RAW_LITERAL(\(([^\(\)]+)\))RAW_LITERAL");
Formula::Action a_power(2, R"RAW_LITERAL(([^\s+*\/\-]+)\s*\^\s*([^\s+*\/\-]+))RAW_LITERAL");
Formula::Action a_ln(1, R"RAW_LITERAL(ln\s*([^\s+*\/\-]+))RAW_LITERAL");
Formula::Action a_multiply(2, R"RAW_LITERAL(([^\s+*\/\-]+)\s*\*\s*([^\s+*\/\-]+))RAW_LITERAL");
Formula::Action a_divide(2, R"RAW_LITERAL(([^\s+*\/\-]+)\s*\/\s*([^\s+*\/\-]+))RAW_LITERAL");
Formula::Action a_plus(2, R"RAW_LITERAL(([^\s+*\/\-]+)\s*\+\s*([^\s+*\/\-]+))RAW_LITERAL");
Formula::Action a_minus(2, R"RAW_LITERAL(([^\s+*\/\-]+)\s*\-\s*([^\s+*\/\-]+))RAW_LITERAL");
Formula::Actions actions = {a_brackets, a_power, a_ln, a_multiply, a_divide, a_plus, a_minus};
Formula f(str, actions);
vector<function<float(vector<float>)>> func;
func.push_back([](vector<float> input){
return input[0];
});
func.push_back([](vector<float> input){
return std::pow(input[0], input[1]);
});
func.push_back([](vector<float> input){
return std::log(input[0]);
});
func.push_back([](vector<float> input){
return input[0] * input[1];
});
func.push_back([](vector<float> input){
return input[0] / input[1];
});
func.push_back([](vector<float> input){
return input[0] + input[1];
});
func.push_back([](vector<float> input){
return input[0] - input[1];
});
function<float(string)> converter = [](string str){
if (str == "E" || str == "e") {
return (float) M_E;
} else if (str == "PI" || str == "pi") {
return (float) M_PI;
}
return stof(str);
};
std::string input;
cout << " < ";
getline(cin, input);
while (input != "") {
f.str = input;
try {
float solution = f.calculate<float>(converter, func);
cout << " > " << solution;
} catch (std::runtime_error e) {
cout << " ! Runtime error occurred: " << e.what();
} catch (std::exception e) {
cout << " ! Error occurred: " << e.what();
}
cout << endl << endl;
cout << " < ";
getline(cin, input);
}
return 0;
}
|
/* C function FCVBBDOPT to access optional outputs from CVBBD_Data */
void FCV_BBDOPT(long int *lenrwbbd, long int *leniwbbd, long int *ngebbd)
{
CVBBDPrecGetWorkSpace(CV_cvodemem, lenrwbbd, leniwbbd);
CVBBDPrecGetNumGfnEvals(CV_cvodemem, ngebbd);
} |
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <memory.h>
#include <string.h>
#include <search.h>
#include <ctype.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "sendip_module.h"
#include <errno.h>
typedef struct _filearray {
unsigned int length;
unsigned int index;
char *lines[0];
} Filearray;
static struct hsearch_data fa_tab;
int
fa_init(void)
{
memset((void *)&fa_tab, 0, sizeof(fa_tab));
#define HSIZE 512 /* why not, probably plenty */
return hcreate_r(HSIZE, &fa_tab);
}
void
fa_close(void)
{
hdestroy_r(&fa_tab);
memset((void *)&fa_tab, 0, sizeof(fa_tab));
}
Filearray *
fa_create(const char *name)
{
FILE *fp;
Filearray *answer;
int linelimit=0;
struct stat statbuf;
char line[BUFSIZ];
if (!(fp = fopen(name, "r")))
return NULL;
/* guess a line count based on size */
if (stat(name, &statbuf) < 0)
return NULL;
linelimit = statbuf.st_size/5;
answer = (Filearray *)malloc(sizeof(struct _filearray)+linelimit*(sizeof(char *)));
if (!answer) return NULL;
/* read the lines into memory */
answer->index=0;
for (answer->length=0; fgets(line, BUFSIZ, fp); ++answer->length) {
if (answer->length >= linelimit-1) {
linelimit *= 2;
answer = (Filearray *)realloc(answer,
sizeof(struct _filearray)+linelimit*(sizeof(char *)));
if (!answer) return NULL;
}
line[strlen(line)-1] = '\0';
answer->lines[answer->length] = strdup(line);
}
fclose(fp);
return answer;
}
/* Find the entry for a given file. If there isn't one,
* create it.
*/
Filearray *
fa_find(const char *name)
{
ENTRY item, *found;
/* Yes, this cast "throws away" the const, but in fact, the
* name is not altered in any way by being entered into the
* hash table. It's just I can't muck with the declaration
* in search.h.
*/
item.key = (char *)name;
item.data = NULL;
if (hsearch_r(item, FIND, &found, &fa_tab) <= 0) {
if (errno == ESRCH || !found) {
item.data = (void *)fa_create(name);
if (!item.data) {
perror(name);
return NULL;
}
if (hsearch_r(item, ENTER, &found, &fa_tab) <= 0) {
perror(name);
return NULL;
}
}
}
return (Filearray *)found->data;
}
/* Takes a file argument, looks it up in the hash table, and
* returns the next line from the associated file.
*/
char *
fileargument(const char *arg)
{
Filearray *fa;
char *answer;
fa = fa_find(arg);
if (!fa) return NULL;
answer = fa->lines[fa->index];
++fa->index;
if (fa->index >= fa->length)
fa->index = 0;
return answer;
}
#ifdef FA_TEST
main()
{
char arg[BUFSIZ];
char *line;
fa_init();
while (1) {
printf("file? ");
fgets(arg, BUFSIZ, stdin);
arg[strlen(arg)-1] = '\0';
if (!arg[0]) break;
line = filearg(arg);
if (line) {
printf("%s: %s\n", arg, line);
} else {
printf("%s: not found\n", arg);
}
}
fa_close();
}
#endif
|
#include <iostream>
using namespace std;
int n,i,k,m[3666777],j,x,z;
string s;
int main(){
cin>>n;
for (i=0; i<n; i++){
cin>>k>>s;
x=k-11;
for (j=0; j<=x; j++){
if (s[j]=='8') {
cout<<"YES\n"; z++; break;
}
}
if (z==0)
cout<<"NO\n"; z=0;
}
}
|
<gh_stars>1-10
/************************************************************************************
Copyright (C) 2021 by <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
************************************************************************************/
#pragma once
NS_GAFF
template <class T, class... Args>
void ConstructFunc(T* obj, Args&&... args)
{
Construct(obj, std::forward<Args>(args)...);
}
template <class T, class... Args>
T* FactoryFunc(IAllocator& allocator, Args&&... args)
{
return GAFF_ALLOCT(T, allocator, std::forward<Args>(args)...);
}
// IVar
template <class T, class Allocator>
template <class DataType>
const DataType& ReflectionDefinition<T, Allocator>::IVar::getDataT(const T& object) const
{
using Type = std::remove_reference<DataType>::type;
const auto& other_refl = GAFF_REFLECTION_NAMESPACE::Reflection<Type>::GetInstance();
const auto& refl = getReflection();
GAFF_ASSERT(refl.isEnum() == other_refl.isEnum());
GAFF_ASSERT(&refl.getReflectionDefinition() == &other_refl.getReflectionDefinition());
return *reinterpret_cast<const DataType*>(getData(&object));
}
template <class T, class Allocator>
template <class DataType>
void ReflectionDefinition<T, Allocator>::IVar::setDataT(T& object, const DataType& data)
{
if (isReadOnly()) {
// $TODO: Log error.
return;
}
using Type = std::remove_reference<DataType>::type;
const auto& other_refl = GAFF_REFLECTION_NAMESPACE::Reflection<Type>::GetInstance();
const auto& refl = getReflection();
GAFF_ASSERT(refl.isEnum() == other_refl.isEnum());
GAFF_ASSERT(&refl.getReflectionDefinition() == &other_refl.getReflectionDefinition());
setData(&object, &data);
}
template <class T, class Allocator>
template <class DataType>
void ReflectionDefinition<T, Allocator>::IVar::setDataMoveT(T& object, DataType&& data)
{
if (isReadOnly()) {
// $TODO: Log error.
return;
}
using Type = std::remove_reference<DataType>::type;
const auto& other_refl = GAFF_REFLECTION_NAMESPACE::Reflection<Type>::GetInstance();
const auto& refl = getReflection();
GAFF_ASSERT(refl.isEnum() == other_refl.isEnum());
GAFF_ASSERT(&refl.getReflectionDefinition() == &other_refl.getReflectionDefinition());
setDataMove(&object, &data);
}
template <class T, class Allocator>
template <class DataType>
const DataType& ReflectionDefinition<T, Allocator>::IVar::getElementT(const T& object, int32_t index) const
{
using Type = std::remove_reference<DataType>::type;
const auto& other_refl = GAFF_REFLECTION_NAMESPACE::Reflection<Type>::GetInstance();
const auto& refl = getReflection();
GAFF_ASSERT(refl.isEnum() == other_refl.isEnum());
if constexpr (std::is_enum<Type>::value) {
GAFF_ASSERT(&refl.getEnumReflectionDefinition() == &other_refl.getEnumReflectionDefinition());
} else {
GAFF_ASSERT(&refl.getReflectionDefinition() == &other_refl.getReflectionDefinition());
}
GAFF_ASSERT((isFixedArray() || isVector()) && size(&object) > index);
return *reinterpret_cast<const DataType*>(getElement(&object, index));
}
template <class T, class Allocator>
template <class DataType>
void ReflectionDefinition<T, Allocator>::IVar::setElementT(T& object, int32_t index, const DataType& data)
{
if (isReadOnly()) {
// $TODO: Log error.
return;
}
using Type = std::remove_reference<DataType>::type;
const auto& other_refl = GAFF_REFLECTION_NAMESPACE::Reflection<Type>::GetInstance();
const auto& refl = getReflection();
GAFF_ASSERT(refl.isEnum() == other_refl.isEnum());
if constexpr (std::is_enum<Type>::value) {
GAFF_ASSERT(&refl.getEnumReflectionDefinition() == &other_refl.getEnumReflectionDefinition());
} else {
GAFF_ASSERT(&refl.getReflectionDefinition() == &other_refl.getReflectionDefinition());
}
GAFF_ASSERT((isFixedArray() || isVector()) && size(&object) > index);
setElement(&object, index, &data);
}
template <class T, class Allocator>
template <class DataType>
void ReflectionDefinition<T, Allocator>::IVar::setElementMoveT(T& object, int32_t index, DataType&& data)
{
if (isReadOnly()) {
// $TODO: Log error.
return;
}
using Type = std::remove_reference<DataType>::type;
const auto& other_refl = GAFF_REFLECTION_NAMESPACE::Reflection<Type>::GetInstance();
const auto& refl = getReflection();
GAFF_ASSERT(refl.isEnum() == other_refl.isEnum());
if constexpr (std::is_enum<Type>::value) {
GAFF_ASSERT(&refl.getEnumReflectionDefinition() == &other_refl.getEnumReflectionDefinition());
} else {
GAFF_ASSERT(&refl.getReflectionDefinition() == &other_refl.getReflectionDefinition());
}
GAFF_ASSERT((isFixedArray() || isVector()) && size(&object) > index);
setElementMove(&object, index, &data);
}
// VarPtr
template <class T, class Allocator>
template <class Var>
ReflectionDefinition<T, Allocator>::VarPtr<Var>::VarPtr(Var T::*ptr):
_ptr(ptr)
{
GAFF_ASSERT(ptr);
}
template <class T, class Allocator>
template <class Var>
const IReflection& ReflectionDefinition<T, Allocator>::VarPtr<Var>::getReflection(void) const
{
if constexpr (IsFlags<Var>()) {
return GAFF_REFLECTION_NAMESPACE::Reflection<typename GetFlagsEnum<Var>::Enum>::GetInstance();
} else {
return GAFF_REFLECTION_NAMESPACE::Reflection<Var>::GetInstance();
}
}
template <class T, class Allocator>
template <class Var>
const void* ReflectionDefinition<T, Allocator>::VarPtr<Var>::getData(const void* object) const
{
const T* const obj = reinterpret_cast<const T*>(object);
return &(obj->*_ptr);
}
template <class T, class Allocator>
template <class Var>
void* ReflectionDefinition<T, Allocator>::VarPtr<Var>::getData(void* object)
{
T* const obj = reinterpret_cast<T*>(object);
return &(obj->*_ptr);
}
template <class T, class Allocator>
template <class Var>
void ReflectionDefinition<T, Allocator>::VarPtr<Var>::setData(void* object, const void* data)
{
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
(obj->*_ptr) = *reinterpret_cast<const Var*>(data);
}
template <class T, class Allocator>
template <class Var>
void ReflectionDefinition<T, Allocator>::VarPtr<Var>::setDataMove(void* object, void* data)
{
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
(obj->*_ptr) = std::move(*reinterpret_cast<Var*>(data));
}
template <class T, class Allocator>
template <class Var>
bool ReflectionDefinition<T, Allocator>::VarPtr<Var>::load(const ISerializeReader& reader, T& object)
{
Var* const var = &(object.*_ptr);
if constexpr (IsFlags<Var>()) {
using Enum = typename GetFlagsEnum<Var>::Enum;
// Iterate over all the flags and read values.
const IEnumReflectionDefinition& ref_def = getReflection().getEnumReflectionDefinition();
const int32_t num_entries = ref_def.getNumEntries();
bool success = true;
for (int32_t i = 0; i < num_entries; ++i) {
const HashStringView32<> flag_name = ref_def.getEntryNameFromIndex(i);
const int32_t flag_index = ref_def.getEntryValue(i);
const auto guard = reader.enterElementGuard(flag_name.getBuffer());
if (!reader.isBool() && !reader.isNull()) {
success = false;
continue;
}
const bool value = reader.readBool(false);
var->set(value, static_cast<Enum>(flag_index));
}
return success;
} else {
return GAFF_REFLECTION_NAMESPACE::Reflection<Var>::Load(reader, *var);
}
}
template <class T, class Allocator>
template <class Var>
void ReflectionDefinition<T, Allocator>::VarPtr<Var>::save(ISerializeWriter& writer, const T& object)
{
const Var* const var = &(object.*_ptr);
if constexpr (IsFlags<Var>()) {
using Enum = typename GetFlagsEnum<Var>::Enum;
// Iterate over all the flags and write values.
const IEnumReflectionDefinition& ref_def = getReflection().getEnumReflectionDefinition();
const int32_t num_entries = ref_def.getNumEntries();
for (int32_t i = 0; i < num_entries; ++i) {
const HashStringView32<> flag_name = ref_def.getEntryNameFromIndex(i);
const int32_t flag_index = ref_def.getEntryValue(i);
const bool value = var->testAll(static_cast<Enum>(flag_index));
writer.writeBool(flag_name.getBuffer(), value);
}
} else {
GAFF_REFLECTION_NAMESPACE::Reflection<Var>::Save(writer, *var);
}
}
template <class T, class Allocator>
template <class Var>
bool ReflectionDefinition<T, Allocator>::VarPtr<Var>::isFlags(void) const
{
return IsFlags<Var>();
}
template <class T, class Allocator>
template <class Var>
void ReflectionDefinition<T, Allocator>::VarPtr<Var>::setFlagValue(void* object, int32_t flag_index, bool value)
{
if constexpr (IsFlags<Var>()) {
using Enum = typename GetFlagsEnum<Var>::Enum;
(reinterpret_cast<T*>(object)->*_ptr).set(value, static_cast<Enum>(flag_index));
} else {
GAFF_ASSERT_MSG(false, "Reflection variable is not flags!");
GAFF_REF(object, flag_index, value);
}
}
template <class T, class Allocator>
template <class Var>
bool ReflectionDefinition<T, Allocator>::VarPtr<Var>::getFlagValue(void* object, int32_t flag_index) const
{
if constexpr (IsFlags<Var>()) {
using Enum = typename GetFlagsEnum<Var>::Enum;
return (reinterpret_cast<T*>(object)->*_ptr).testAll(static_cast<Enum>(flag_index));
} else {
GAFF_ASSERT_MSG(false, "Reflection variable is not flags!");
GAFF_REF(object, flag_index);
return false;
}
}
// VarFlagPtr
template <class T, class Allocator>
template <class Enum>
ReflectionDefinition<T, Allocator>::VarFlagPtr<Enum>::VarFlagPtr(Flags<Enum> T::*ptr, uint8_t flag_index):
_ptr(ptr), _flag_index(flag_index), _cache(false)
{
GAFF_ASSERT(ptr);
}
template <class T, class Allocator>
template <class Enum>
const IReflection& ReflectionDefinition<T, Allocator>::VarFlagPtr<Enum>::getReflection(void) const
{
return GAFF_REFLECTION_NAMESPACE::Reflection<bool>::GetInstance();
}
template <class T, class Allocator>
template <class Enum>
const void* ReflectionDefinition<T, Allocator>::VarFlagPtr<Enum>::getData(const void* object) const
{
return const_cast<VarFlagPtr<Enum>*>(this)->getData(const_cast<void*>(object));
}
template <class T, class Allocator>
template <class Enum>
void* ReflectionDefinition<T, Allocator>::VarFlagPtr<Enum>::getData(void* object)
{
T* const obj = reinterpret_cast<T*>(object);
_cache = (obj->*_ptr).testAll(static_cast<Enum>(_flag_index));
return &_cache;
}
template <class T, class Allocator>
template <class Enum>
void ReflectionDefinition<T, Allocator>::VarFlagPtr<Enum>::setData(void* object, const void* data)
{
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
(obj->*_ptr).set(*reinterpret_cast<const bool*>(data), static_cast<Enum>(_flag_index));
}
template <class T, class Allocator>
template <class Enum>
void ReflectionDefinition<T, Allocator>::VarFlagPtr<Enum>::setDataMove(void* object, void* data)
{
setData(object, data);
}
template <class T, class Allocator>
template <class Enum>
bool ReflectionDefinition<T, Allocator>::VarFlagPtr<Enum>::load(const ISerializeReader& /*reader*/, T& /*object*/)
{
GAFF_ASSERT_MSG(false, "VarFlagPtr::load() should never be called.");
return false;
}
template <class T, class Allocator>
template <class Enum>
void ReflectionDefinition<T, Allocator>::VarFlagPtr<Enum>::save(ISerializeWriter& /*writer*/, const T& /*object*/)
{
GAFF_ASSERT_MSG(false, "VarFlagPtr::save() should never be called.");
}
// VarFuncPtrWithCache
template <class T, class Allocator>
template <class Ret, class Var>
ReflectionDefinition<T, Allocator>::VarFuncPtrWithCache<Ret, Var>::VarFuncPtrWithCache(GetterMemberFunc getter, SetterMemberFunc setter):
_getter_member(getter), _setter_member(setter), _member_func(true)
{
GAFF_ASSERT(getter);
}
template <class T, class Allocator>
template <class Ret, class Var>
ReflectionDefinition<T, Allocator>::VarFuncPtrWithCache<Ret, Var>::VarFuncPtrWithCache(GetterFunc getter, SetterFunc setter):
_getter(getter), _setter(setter), _member_func(false)
{
GAFF_ASSERT(getter);
}
template <class T, class Allocator>
template <class Ret, class Var>
const IReflection& ReflectionDefinition<T, Allocator>::VarFuncPtrWithCache<Ret, Var>::getReflection(void) const
{
return GAFF_REFLECTION_NAMESPACE::Reflection<RetType>::GetInstance();
}
template <class T, class Allocator>
template <class Ret, class Var>
const void* ReflectionDefinition<T, Allocator>::VarFuncPtrWithCache<Ret, Var>::getData(const void* object) const
{
return const_cast<ReflectionDefinition<T, Allocator>::VarFuncPtrWithCache<Ret, Var>*>(this)->getData(const_cast<void*>(object));
}
template <class T, class Allocator>
template <class Ret, class Var>
void* ReflectionDefinition<T, Allocator>::VarFuncPtrWithCache<Ret, Var>::getData(void* object)
{
GAFF_ASSERT(object);
GAFF_ASSERT(_getter);
const T* const obj = reinterpret_cast<const T*>(object);
_cache = callGetter(*obj);
return &_cache;
}
template <class T, class Allocator>
template <class Ret, class Var>
void ReflectionDefinition<T, Allocator>::VarFuncPtrWithCache<Ret, Var>::setData(void* object, const void* data)
{
GAFF_ASSERT(object);
GAFF_ASSERT(_setter);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
callSetter(*obj, *reinterpret_cast<const VarType*>(data));
}
template <class T, class Allocator>
template <class Ret, class Var>
void ReflectionDefinition<T, Allocator>::VarFuncPtrWithCache<Ret, Var>::setDataMove(void* object, void* data)
{
GAFF_ASSERT(object);
GAFF_ASSERT(_setter);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
callSetter(*obj, std::move(*reinterpret_cast<VarType*>(data)));
}
template <class T, class Allocator>
template <class Ret, class Var>
bool ReflectionDefinition<T, Allocator>::VarFuncPtrWithCache<Ret, Var>::load(const ISerializeReader& reader, T& object)
{
GAFF_ASSERT(_getter);
GAFF_ASSERT(_setter);
VarType var;
if (!GAFF_REFLECTION_NAMESPACE::Reflection<RetType>::Load(reader, var)) {
return false;
}
callSetter(object, var);
return true;
}
template <class T, class Allocator>
template <class Ret, class Var>
void ReflectionDefinition<T, Allocator>::VarFuncPtrWithCache<Ret, Var>::save(ISerializeWriter& writer, const T& object)
{
GAFF_ASSERT(_getter);
GAFF_REFLECTION_NAMESPACE::Reflection<RetType>::Save(writer, callGetter(object));
}
template <class T, class Allocator>
template <class Ret, class Var>
void ReflectionDefinition<T, Allocator>::VarFuncPtrWithCache<Ret, Var>::callSetter(T& object, Var value) const
{
GAFF_ASSERT(_setter);
return (_member_func) ? (object.*_setter_member)(value) : _setter(object, value);
}
template <class T, class Allocator>
template <class Ret, class Var>
Ret ReflectionDefinition<T, Allocator>::VarFuncPtrWithCache<Ret, Var>::callGetter(const T& object) const
{
GAFF_ASSERT(_getter);
return (_member_func) ? (object.*_getter_member)() : _getter(object);
}
// VarFuncPtr
template <class T, class Allocator>
template <class Ret, class Var>
ReflectionDefinition<T, Allocator>::VarFuncPtr<Ret, Var>::VarFuncPtr(GetterMemberFunc getter, SetterMemberFunc setter):
_getter_member(getter),
_setter_member(setter),
_member_func(true)
{
GAFF_ASSERT(getter);
}
template <class T, class Allocator>
template <class Ret, class Var>
ReflectionDefinition<T, Allocator>::VarFuncPtr<Ret, Var>::VarFuncPtr(GetterFunc getter, SetterFunc setter) :
_getter(getter),
_setter(setter),
_member_func(false)
{
GAFF_ASSERT(getter);
}
template <class T, class Allocator>
template <class Ret, class Var>
const IReflection& ReflectionDefinition<T, Allocator>::VarFuncPtr<Ret, Var>::getReflection(void) const
{
return GAFF_REFLECTION_NAMESPACE::Reflection<RetType>::GetInstance();
}
template <class T, class Allocator>
template <class Ret, class Var>
const void* ReflectionDefinition<T, Allocator>::VarFuncPtr<Ret, Var>::getData(const void* object) const
{
return const_cast<ReflectionDefinition<T, Allocator>::VarFuncPtr<Ret, Var>*>(this)->getData(const_cast<void*>(object));
}
template <class T, class Allocator>
template <class Ret, class Var>
void* ReflectionDefinition<T, Allocator>::VarFuncPtr<Ret, Var>::getData(void* object)
{
GAFF_ASSERT(object);
GAFF_ASSERT(_getter);
T* const obj = reinterpret_cast<T*>(object);
if constexpr (std::is_reference<Ret>::value) {
const Ret& val = callGetter(*obj);
RetType* const ptr = const_cast<RetType*>(&val);
return ptr;
} else if (std::is_pointer<Ret>::value) {
return const_cast<RetType*>(callGetter(*obj));
}
}
template <class T, class Allocator>
template <class Ret, class Var>
void ReflectionDefinition<T, Allocator>::VarFuncPtr<Ret, Var>::setData(void* object, const void* data)
{
GAFF_ASSERT(object);
GAFF_ASSERT(_setter);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
callSetter(*obj, *reinterpret_cast<const RetType*>(data));
}
template <class T, class Allocator>
template <class Ret, class Var>
void ReflectionDefinition<T, Allocator>::VarFuncPtr<Ret, Var>::setDataMove(void* object, void* data)
{
GAFF_ASSERT(object);
GAFF_ASSERT(_setter);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
callSetter(*obj, std::move(*reinterpret_cast<RetType*>(data)));
}
template <class T, class Allocator>
template <class Ret, class Var>
bool ReflectionDefinition<T, Allocator>::VarFuncPtr<Ret, Var>::load(const ISerializeReader& reader, T& object)
{
GAFF_ASSERT(_getter);
if constexpr (std::is_reference<Ret>::value) {
RetType& val = const_cast<RetType&>(callGetter(object));
return GAFF_REFLECTION_NAMESPACE::Reflection<RetType>::Load(reader, val);
} else {
RetType* const val = const_cast<RetType*>(callGetter(object));
return GAFF_REFLECTION_NAMESPACE::Reflection<RetType>::Load(reader, *val);
}
}
template <class T, class Allocator>
template <class Ret, class Var>
void ReflectionDefinition<T, Allocator>::VarFuncPtr<Ret, Var>::save(ISerializeWriter& writer, const T& object)
{
GAFF_ASSERT(_getter);
if constexpr (std::is_reference<Ret>::value) {
const RetType& val = callGetter(object);
GAFF_REFLECTION_NAMESPACE::Reflection<RetType>::Save(writer, val);
} else {
const RetType* const val = callGetter(object);
GAFF_REFLECTION_NAMESPACE::Reflection<RetType>::Save(writer, *val);
}
}
template <class T, class Allocator>
template <class Ret, class Var>
void ReflectionDefinition<T, Allocator>::VarFuncPtr<Ret, Var>::callSetter(T& object, Var value) const
{
GAFF_ASSERT(_setter);
return (_member_func) ? (object.*_setter_member)(value) : _setter(object, value);
}
template <class T, class Allocator>
template <class Ret, class Var>
Ret ReflectionDefinition<T, Allocator>::VarFuncPtr<Ret, Var>::callGetter(const T& object) const
{
GAFF_ASSERT(_getter);
return (_member_func) ? (object.*_getter_member)() : _getter(object);
}
// BaseVarPtr
template <class T, class Allocator>
template <class Base>
ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::BaseVarPtr(typename ReflectionDefinition<Base, Allocator>::IVar* base_var):
_base_var(base_var)
{
}
template <class T, class Allocator>
template <class Base>
const IReflection& ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::getReflection(void) const
{
return _base_var->getReflection();
}
template <class T, class Allocator>
template <class Base>
const void* ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::getData(const void* object) const
{
GAFF_ASSERT(object);
const Base* const obj = reinterpret_cast<const T*>(object);
return _base_var->getData(obj);
}
template <class T, class Allocator>
template <class Base>
void* ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::getData(void* object)
{
GAFF_ASSERT(object);
Base* const obj = reinterpret_cast<T*>(object);
return _base_var->getData(obj);
}
template <class T, class Allocator>
template <class Base>
void ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::setData(void* object, const void* data)
{
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly() || _base_var->isReadOnly()) {
// $TODO: Log error.
return;
}
Base* const obj = reinterpret_cast<T*>(object);
_base_var->setData(obj, data);
}
template <class T, class Allocator>
template <class Base>
void ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::setDataMove(void* object, void* data)
{
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly() || _base_var->isReadOnly()) {
// $TODO: Log error.
return;
}
Base* const obj = reinterpret_cast<T*>(object);
_base_var->setData(obj, data);
}
template <class T, class Allocator>
template <class Base>
bool ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::isFixedArray(void) const
{
return _base_var->isFixedArray();
}
template <class T, class Allocator>
template <class Base>
bool ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::isVector(void) const
{
return _base_var->isVector();
}
template <class T, class Allocator>
template <class Base>
bool ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::isFlags(void) const
{
return _base_var->isFlags();
}
template <class T, class Allocator>
template <class Base>
int32_t ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::size(const void* object) const
{
GAFF_ASSERT(object);
const Base* const obj = reinterpret_cast<const T*>(object);
return _base_var->size(obj);
}
template <class T, class Allocator>
template <class Base>
const void* ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::getElement(const void* object, int32_t index) const
{
GAFF_ASSERT(index < size(object));
GAFF_ASSERT(object);
const Base* const obj = reinterpret_cast<const T*>(object);
return _base_var->getElement(obj, index);
}
template <class T, class Allocator>
template <class Base>
void* ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::getElement(void* object, int32_t index)
{
GAFF_ASSERT(index < size(object));
GAFF_ASSERT(object);
Base* const obj = reinterpret_cast<T*>(object);
return _base_var->getElement(obj, index);
}
template <class T, class Allocator>
template <class Base>
void ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::setElement(void* object, int32_t index, const void* data)
{
GAFF_ASSERT(index < size(object));
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly() || _base_var->isReadOnly()) {
// $TODO: Log error.
return;
}
Base* const obj = reinterpret_cast<T*>(object);
_base_var->setElement(obj, index, data);
}
template <class T, class Allocator>
template <class Base>
void ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::setElementMove(void* object, int32_t index, void* data)
{
GAFF_ASSERT(index < size(object));
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly() || _base_var->isReadOnly()) {
// $TODO: Log error.
return;
}
Base* const obj = reinterpret_cast<T*>(object);
_base_var->setElementMove(obj, index, data);
}
template <class T, class Allocator>
template <class Base>
void ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::swap(void* object, int32_t index_a, int32_t index_b)
{
GAFF_ASSERT(index_a < size(object));
GAFF_ASSERT(index_b < size(object));
GAFF_ASSERT(object);
if (isReadOnly() || _base_var->isReadOnly()) {
// $TODO: Log error.
return;
}
Base* const obj = reinterpret_cast<T*>(object);
_base_var->swap(obj, index_a, index_b);
}
template <class T, class Allocator>
template <class Base>
void ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::resize(void* object, size_t new_size)
{
GAFF_ASSERT(object);
if (isReadOnly() || _base_var->isReadOnly()) {
// $TODO: Log error.
return;
}
Base* const obj = reinterpret_cast<T*>(object);
_base_var->resize(obj, new_size);
}
template <class T, class Allocator>
template <class Base>
bool ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::load(const ISerializeReader& reader, T& object)
{
return _base_var->load(reader, object);
}
template <class T, class Allocator>
template <class Base>
void ReflectionDefinition<T, Allocator>::BaseVarPtr<Base>::save(ISerializeWriter& writer, const T& object)
{
_base_var->save(writer, object);
}
// ArrayPtr
template <class T, class Allocator>
template <class Var, size_t array_size>
ReflectionDefinition<T, Allocator>::ArrayPtr<Var, array_size>::ArrayPtr(Var (T::*ptr)[array_size]):
_ptr(ptr)
{
GAFF_ASSERT(ptr);
}
template <class T, class Allocator>
template <class Var, size_t array_size>
const IReflection& ReflectionDefinition<T, Allocator>::ArrayPtr<Var, array_size>::getReflection(void) const
{
return GAFF_REFLECTION_NAMESPACE::Reflection<Var>::GetInstance();
}
template <class T, class Allocator>
template <class Var, size_t array_size>
const void* ReflectionDefinition<T, Allocator>::ArrayPtr<Var, array_size>::getData(const void* object) const
{
GAFF_ASSERT(object);
const T* const obj = reinterpret_cast<const T*>(object);
return (obj->*_ptr);
}
template <class T, class Allocator>
template <class Var, size_t array_size>
void* ReflectionDefinition<T, Allocator>::ArrayPtr<Var, array_size>::getData(void* object)
{
GAFF_ASSERT(object);
T* const obj = reinterpret_cast<T*>(object);
return (obj->*_ptr);
}
template <class T, class Allocator>
template <class Var, size_t array_size>
void ReflectionDefinition<T, Allocator>::ArrayPtr<Var, array_size>::setData(void* object, const void* data)
{
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
const Var* const vars = reinterpret_cast<const Var*>(data);
T* const obj = reinterpret_cast<T*>(object);
for (int32_t i = 0; i < static_cast<int32_t>(array_size); ++i) {
(obj->*_ptr)[i] = vars[i];
}
}
template <class T, class Allocator>
template <class Var, size_t array_size>
void ReflectionDefinition<T, Allocator>::ArrayPtr<Var, array_size>::setDataMove(void* object, void* data)
{
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
const Var* const vars = reinterpret_cast<Var*>(data);
T* const obj = reinterpret_cast<T*>(object);
for (int32_t i = 0; i < static_cast<int32_t>(array_size); ++i) {
(obj->*_ptr)[i] = std::move(vars[i]);
}
}
template <class T, class Allocator>
template <class Var, size_t array_size>
const void* ReflectionDefinition<T, Allocator>::ArrayPtr<Var, array_size>::getElement(const void* object, int32_t index) const
{
GAFF_ASSERT(index < array_size);
GAFF_ASSERT(object);
const T* const obj = reinterpret_cast<const T*>(object);
return &(obj->*_ptr)[index];
}
template <class T, class Allocator>
template <class Var, size_t array_size>
void* ReflectionDefinition<T, Allocator>::ArrayPtr<Var, array_size>::getElement(void* object, int32_t index)
{
GAFF_ASSERT(index < array_size);
GAFF_ASSERT(object);
T* const obj = reinterpret_cast<T*>(object);
return &(obj->*_ptr)[index];
}
template <class T, class Allocator>
template <class Var, size_t array_size>
void ReflectionDefinition<T, Allocator>::ArrayPtr<Var, array_size>::setElement(void* object, int32_t index, const void* data)
{
GAFF_ASSERT(index < array_size);
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
(obj->*_ptr)[index] = *reinterpret_cast<const Var*>(data);
}
template <class T, class Allocator>
template <class Var, size_t array_size>
void ReflectionDefinition<T, Allocator>::ArrayPtr<Var, array_size>::setElementMove(void* object, int32_t index, void* data)
{
GAFF_ASSERT(index < array_size);
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
(obj->*_ptr)[index] = std::move(*reinterpret_cast<Var*>(data));
}
template <class T, class Allocator>
template <class Var, size_t array_size>
void ReflectionDefinition<T, Allocator>::ArrayPtr<Var, array_size>::swap(void* object, int32_t index_a, int32_t index_b)
{
GAFF_ASSERT(index_a < array_size);
GAFF_ASSERT(index_b < array_size);
GAFF_ASSERT(object);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
eastl::swap((obj->*_ptr)[index_a], (obj->*_ptr)[index_b]);
}
template <class T, class Allocator>
template <class Var, size_t array_size>
void ReflectionDefinition<T, Allocator>::ArrayPtr<Var, array_size>::resize(void*, size_t)
{
GAFF_ASSERT_MSG(false, "Reflection variable is a fixed size array!");
}
template <class T, class Allocator>
template <class Var, size_t array_size>
bool ReflectionDefinition<T, Allocator>::ArrayPtr<Var, array_size>::load(const ISerializeReader& reader, T& object)
{
constexpr int32_t size = static_cast<int32_t>(array_size);
GAFF_ASSERT(reader.size() == size);
bool success = true;
for (int32_t i = 0; i < size; ++i) {
ScopeGuard scope = reader.enterElementGuard(i);
success = success && GAFF_REFLECTION_NAMESPACE::Reflection<Var>::Load(reader, (object.*_ptr)[i]);
}
return success;
}
template <class T, class Allocator>
template <class Var, size_t array_size>
void ReflectionDefinition<T, Allocator>::ArrayPtr<Var, array_size>::save(ISerializeWriter& writer, const T& object)
{
constexpr int32_t size = static_cast<int32_t>(array_size);
writer.startArray(static_cast<uint32_t>(array_size));
for (int32_t i = 0; i < size; ++i) {
GAFF_REFLECTION_NAMESPACE::Reflection<Var>::Save(writer, (object.*_ptr)[i]);
}
writer.endArray();
}
// VectorPtr
template <class T, class Allocator>
template <class Var, class Vec_Allocator>
ReflectionDefinition<T, Allocator>::VectorPtr<Var, Vec_Allocator>::VectorPtr(Vector<Var, Vec_Allocator> (T::*ptr)):
_ptr(ptr)
{
GAFF_ASSERT(ptr);
}
template <class T, class Allocator>
template <class Var, class Vec_Allocator>
const IReflection& ReflectionDefinition<T, Allocator>::VectorPtr<Var, Vec_Allocator>::getReflection(void) const
{
return GAFF_REFLECTION_NAMESPACE::Reflection<Var>::GetInstance();
}
template <class T, class Allocator>
template <class Var, class Vec_Allocator>
const void* ReflectionDefinition<T, Allocator>::VectorPtr<Var, Vec_Allocator>::getData(const void* object) const
{
GAFF_ASSERT(object);
const T* const obj = reinterpret_cast<const T*>(object);
return &(obj->*_ptr);
}
template <class T, class Allocator>
template <class Var, class Vec_Allocator>
void* ReflectionDefinition<T, Allocator>::VectorPtr<Var, Vec_Allocator>::getData(void* object)
{
GAFF_ASSERT(object);
T* const obj = reinterpret_cast<T*>(object);
return &(obj->*_ptr);
}
template <class T, class Allocator>
template <class Var, class Vec_Allocator>
void ReflectionDefinition<T, Allocator>::VectorPtr<Var, Vec_Allocator>::setData(void* object, const void* data)
{
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
const Var* const vars = reinterpret_cast<const Var*>(data);
T* const obj = reinterpret_cast<T*>(object);
int32_t arr_size = size(object);
for (int32_t i = 0; i < arr_size; ++i) {
(obj->*_ptr)[i] = vars[i];
}
}
template <class T, class Allocator>
template <class Var, class Vec_Allocator>
void ReflectionDefinition<T, Allocator>::VectorPtr<Var, Vec_Allocator>::setDataMove(void* object, void* data)
{
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
const Var* const vars = reinterpret_cast<Var*>(data);
T* const obj = reinterpret_cast<T*>(object);
int32_t arr_size = size(object);
for (int32_t i = 0; i < arr_size; ++i) {
(obj->*_ptr)[i] = std::move(vars[i]);
}
}
template <class T, class Allocator>
template <class Var, class Vec_Allocator>
int32_t ReflectionDefinition<T, Allocator>::VectorPtr<Var, Vec_Allocator>::size(const void* object) const
{
GAFF_ASSERT(object);
const T* const obj = reinterpret_cast<const T*>(object);
return static_cast<int32_t>((obj->*_ptr).size());
}
template <class T, class Allocator>
template <class Var, class Vec_Allocator>
const void* ReflectionDefinition<T, Allocator>::VectorPtr<Var, Vec_Allocator>::getElement(const void* object, int32_t index) const
{
GAFF_ASSERT(index < size(object));
GAFF_ASSERT(object);
const T* const obj = reinterpret_cast<const T*>(object);
return &(obj->*_ptr)[index];
}
template <class T, class Allocator>
template <class Var, class Vec_Allocator>
void* ReflectionDefinition<T, Allocator>::VectorPtr<Var, Vec_Allocator>::getElement(void* object, int32_t index)
{
GAFF_ASSERT(index < size(object));
GAFF_ASSERT(object);
T* const obj = reinterpret_cast<T*>(object);
return &(obj->*_ptr)[index];
}
template <class T, class Allocator>
template <class Var, class Vec_Allocator>
void ReflectionDefinition<T, Allocator>::VectorPtr<Var, Vec_Allocator>::setElement(void* object, int32_t index, const void* data)
{
GAFF_ASSERT(index < size(object));
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
(obj->*_ptr)[index] = *reinterpret_cast<const Var*>(data);
}
template <class T, class Allocator>
template <class Var, class Vec_Allocator>
void ReflectionDefinition<T, Allocator>::VectorPtr<Var, Vec_Allocator>::setElementMove(void* object, int32_t index, void* data)
{
GAFF_ASSERT(index < size(object));
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
(obj->*_ptr)[index] = std::move(*reinterpret_cast<Var*>(data));
}
template <class T, class Allocator>
template <class Var, class Vec_Allocator>
void ReflectionDefinition<T, Allocator>::VectorPtr<Var, Vec_Allocator>::swap(void* object, int32_t index_a, int32_t index_b)
{
GAFF_ASSERT(index_a < size(object));
GAFF_ASSERT(index_b < size(object));
GAFF_ASSERT(object);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
eastl::swap((obj->*_ptr)[index_a], (obj->*_ptr)[index_b]);
}
template <class T, class Allocator>
template <class Var, class Vec_Allocator>
void ReflectionDefinition<T, Allocator>::VectorPtr<Var, Vec_Allocator>::resize(void* object, size_t new_size)
{
GAFF_ASSERT(object);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
(obj->*_ptr).resize(new_size);
}
template <class T, class Allocator>
template <class Var, class Vec_Allocator>
bool ReflectionDefinition<T, Allocator>::VectorPtr<Var, Vec_Allocator>::load(const ISerializeReader& reader, T& object)
{
const int32_t size = reader.size();
(object.*_ptr).resize(static_cast<size_t>(size));
bool success = true;
for (int32_t i = 0; i < size; ++i) {
ScopeGuard scope = reader.enterElementGuard(i);
success = success && GAFF_REFLECTION_NAMESPACE::Reflection<Var>::Load(reader, (object.*_ptr)[i]);
}
return success;
}
template <class T, class Allocator>
template <class Var, class Vec_Allocator>
void ReflectionDefinition<T, Allocator>::VectorPtr<Var, Vec_Allocator>::save(ISerializeWriter& writer, const T& object)
{
const int32_t size = static_cast<int32_t>((object.*_ptr).size());
writer.startArray(static_cast<uint32_t>(size));
for (int32_t i = 0; i < size; ++i) {
GAFF_REFLECTION_NAMESPACE::Reflection<Var>::Save(writer, (object.*_ptr)[i]);
}
writer.endArray();
}
// VectorMapPtr
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::VectorMapPtr(VectorMap<Key, Value, VecMap_Allocator> T::* ptr):
_ptr(ptr)
{
GAFF_ASSERT(ptr);
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
const IReflection& ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::getReflection(void) const
{
return GAFF_REFLECTION_NAMESPACE::Reflection<Value>::GetInstance();
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
const IReflection& ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::getReflectionKey(void) const
{
return GAFF_REFLECTION_NAMESPACE::Reflection<Key>::GetInstance();
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
const void* ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::getData(const void* object) const
{
GAFF_ASSERT(object);
const T* const obj = reinterpret_cast<const T*>(object);
return &(obj->*_ptr);
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
void* ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::getData(void* object)
{
GAFF_ASSERT(object);
T* const obj = reinterpret_cast<T*>(object);
return &(obj->*_ptr);
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
void ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::setData(void* object, const void* data)
{
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
const eastl::pair<Key, Value>* const vars = reinterpret_cast<const eastl::pair<Key, Value>*>(data);
T* const obj = reinterpret_cast<T*>(object);
int32_t arr_size = size(object);
for (int32_t i = 0; i < arr_size; ++i) {
(obj->*_ptr)[vars[i].first] = vars[i].second;
}
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
void ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::setDataMove(void* object, void* data)
{
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
const eastl::pair<Key, Value>* const vars = reinterpret_cast<const eastl::pair<Key, Value>*>(data);
T* const obj = reinterpret_cast<T*>(object);
int32_t arr_size = size(object);
for (int32_t i = 0; i < arr_size; ++i) {
(obj->*_ptr)[std::move(vars[i].first)] = std::move(vars[i].second);
}
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
int32_t ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::size(const void* object) const
{
GAFF_ASSERT(object);
const T* const obj = reinterpret_cast<const T*>(object);
return static_cast<int32_t>((obj->*_ptr).size());
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
const void* ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::getElement(const void* object, int32_t index) const
{
GAFF_ASSERT(index < size(object));
GAFF_ASSERT(object);
const T* const obj = reinterpret_cast<const T*>(object);
return &(obj->*_ptr).at(index);
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
void* ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::getElement(void* object, int32_t index)
{
GAFF_ASSERT(index < size(object));
GAFF_ASSERT(object);
T* const obj = reinterpret_cast<T*>(object);
return &(obj->*_ptr).at(index);
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
void ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::setElement(void* object, int32_t index, const void* data)
{
GAFF_ASSERT(index < size(object));
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
(obj->*_ptr).at(index).second = *reinterpret_cast<const Value*>(data);
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
void ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::setElementMove(void* object, int32_t index, void* data)
{
GAFF_ASSERT(index < size(object));
GAFF_ASSERT(object);
GAFF_ASSERT(data);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
(obj->*_ptr).at(index).second = std::move(*reinterpret_cast<const Value*>(data));
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
void ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::swap(void* object, int32_t index_a, int32_t index_b)
{
GAFF_ASSERT(index_a < size(object));
GAFF_ASSERT(index_b < size(object));
GAFF_ASSERT(object);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
eastl::swap((obj->*_ptr).at(index_a).second, (obj->*_ptr).at(index_b).second);
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
void ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::resize(void* object, size_t new_size)
{
GAFF_ASSERT(object);
if (isReadOnly()) {
// $TODO: Log error.
return;
}
T* const obj = reinterpret_cast<T*>(object);
return (obj->*_ptr).resize(new_size);
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
bool ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::load(const ISerializeReader& reader, T& object)
{
const int32_t size = reader.size();
(object.*_ptr).reserve(static_cast<size_t>(size));
bool success = true;
for (int32_t i = 0; i < size; ++i) {
ScopeGuard scope = reader.enterElementGuard(i);
bool key_loaded = true;
Key key;
{
ScopeGuard guard_key = reader.enterElementGuard("key");
key_loaded = GAFF_REFLECTION_NAMESPACE::Reflection<Key>::Load(reader, key);
success = success && key_loaded;
}
if (key_loaded)
{
ScopeGuard guard_value = reader.enterElementGuard("value");
success = success && GAFF_REFLECTION_NAMESPACE::Reflection<Value>::Load(reader, (object.*_ptr)[key]);
}
}
return success;
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator>
void ReflectionDefinition<T, Allocator>::VectorMapPtr<Key, Value, VecMap_Allocator>::save(ISerializeWriter& writer, const T& object)
{
const int32_t size = static_cast<int32_t>((object.*_ptr).size());
writer.startArray(static_cast<uint32_t>(size));
for (int32_t i = 0; i < size; ++i) {
writer.startObject(2);
writer.writeKey("key");
GAFF_REFLECTION_NAMESPACE::Reflection<Key>::Save(writer, (object.*_ptr).at(i).first);
writer.writeKey("value");
GAFF_REFLECTION_NAMESPACE::Reflection<Value>::Save(writer, (object.*_ptr).at(i).second);
writer.endObject();
}
writer.endArray();
}
// ReflectionDefinition
template <class T, class Allocator>
template <class... Args>
T* ReflectionDefinition<T, Allocator>::create(Args&&... args) const
{
return createT<T>(_allocator, std::forward<Args>(args)...);
}
template <class T, class Allocator>
const char* ReflectionDefinition<T, Allocator>::getFriendlyName(void) const
{
return _friendly_name.data();
}
template <class T, class Allocator>
bool ReflectionDefinition<T, Allocator>::load(const ISerializeReader& reader, void* object, bool refl_load) const
{
return load(reader, *reinterpret_cast<T*>(object), refl_load);
}
template <class T, class Allocator>
void ReflectionDefinition<T, Allocator>::save(ISerializeWriter& writer, const void* object, bool refl_save) const
{
save(writer, *reinterpret_cast<const T*>(object), refl_save);
}
template <class T, class Allocator>
bool ReflectionDefinition<T, Allocator>::load(const ISerializeReader& reader, T& object, bool refl_load) const
{
if (_serialize_load && !refl_load) {
return _serialize_load(reader, object);
} else {
for (auto& entry : _vars) {
if (entry.second->canSerialize()) {
const char* const name = entry.first.getBuffer();
if (!reader.exists(name)) {
// I don't like this method of determining something as optional.
const auto* const attr = getVarAttr<IAttribute>(FNV1aHash32String(name), FNV1aHash64Const("OptionalAttribute"));
if (!attr) {
// $TODO: Log error.
return false;
}
continue;
}
ScopeGuard scope = reader.enterElementGuard(name);
entry.second->load(reader, object);
}
}
}
return true;
}
template <class T, class Allocator>
void ReflectionDefinition<T, Allocator>::save(ISerializeWriter& writer, const T& object, bool refl_save) const
{
if (_serialize_save && !refl_save) {
_serialize_save(writer, object);
} else {
uint32_t writable_vars = 0;
// Count how many vars we're actually writing to the object.
for (auto& entry : _vars) {
// If not read-only and does not have the NoSerialize attribute.
if (entry.second->canSerialize()) {
++writable_vars;
}
}
// Write out the object.
writer.startObject(writable_vars + 1);
writer.writeUInt64("version", getReflectionInstance().getVersion().getHash());
for (auto& entry : _vars) {
if (entry.second->canSerialize()) {
writer.writeKey(entry.first.getBuffer());
entry.second->save(writer, object);
}
}
writer.endObject();
}
}
template <class T, class Allocator>
Hash64 ReflectionDefinition<T, Allocator>::getInstanceHash(const void* object, Hash64 init) const
{
return getInstanceHash(*reinterpret_cast<const T*>(object), init);
}
template <class T, class Allocator>
Hash64 ReflectionDefinition<T, Allocator>::getInstanceHash(const T& object, Hash64 init) const
{
return (_instance_hash) ? _instance_hash(object, init) : FNV1aHash64T(object, init);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::setInstanceHash(InstanceHashFunc hash_func)
{
_instance_hash = hash_func;
return *this;
}
template <class T, class Allocator>
const void* ReflectionDefinition<T, Allocator>::getInterface(Hash64 class_hash, const void* object) const
{
if (class_hash == GAFF_REFLECTION_NAMESPACE::Reflection<T>::GetHash()) {
return object;
}
auto it = _base_class_offsets.find(class_hash);
if (it == _base_class_offsets.end()) {
return nullptr;
}
return reinterpret_cast<const int8_t*>(object) + it->second;
}
template <class T, class Allocator>
void* ReflectionDefinition<T, Allocator>::getInterface(Hash64 class_hash, void* object) const
{
return const_cast<void*>(getInterface(class_hash, const_cast<const void*>(object)));
}
template <class T, class Allocator>
bool ReflectionDefinition<T, Allocator>::hasInterface(Hash64 class_hash) const
{
if (class_hash == GAFF_REFLECTION_NAMESPACE::Reflection<T>::GetHash()) {
return true;
}
auto it = _base_class_offsets.find(class_hash);
return it != _base_class_offsets.end();
}
template <class T, class Allocator>
void ReflectionDefinition<T, Allocator>::setAllocator(const Allocator& allocator)
{
_base_class_offsets.set_allocator(allocator);
_factories.set_allocator(allocator);
_ctors.set_allocator(allocator);
_vars.set_allocator(allocator);
_funcs.set_allocator(allocator);
_static_funcs.set_allocator(allocator);
_var_attrs.set_allocator(allocator);
_func_attrs.set_allocator(allocator);
_class_attrs.set_allocator(allocator);
_static_func_attrs.set_allocator(allocator);
_friendly_name.set_allocator(allocator);
_allocator = allocator;
}
template <class T, class Allocator>
IAllocator& ReflectionDefinition<T, Allocator>::getAllocator(void)
{
return _allocator;
}
template <class T, class Allocator>
const IReflection& ReflectionDefinition<T, Allocator>::getReflectionInstance(void) const
{
return GAFF_REFLECTION_NAMESPACE::Reflection<T>::GetInstance();
}
template <class T, class Allocator>
int32_t ReflectionDefinition<T, Allocator>::size(void) const
{
return sizeof(T);
}
template <class T, class Allocator>
bool ReflectionDefinition<T, Allocator>::isPolymorphic(void) const
{
return std::is_polymorphic<T>::value;
}
template <class T, class Allocator>
bool ReflectionDefinition<T, Allocator>::isBuiltIn(void) const
{
return false;
}
template <class T, class Allocator>
int32_t ReflectionDefinition<T, Allocator>::getNumVars(void) const
{
return static_cast<int32_t>(_vars.size());
}
template <class T, class Allocator>
HashStringView32<> ReflectionDefinition<T, Allocator>::getVarName(int32_t index) const
{
GAFF_ASSERT(index < static_cast<int32_t>(_vars.size()));
return HashStringView32<>((_vars.begin() + index)->first);
}
template <class T, class Allocator>
IReflectionVar* ReflectionDefinition<T, Allocator>::getVar(int32_t index) const
{
GAFF_ASSERT(index < static_cast<int32_t>(_vars.size()));
return getVarT(index);
}
template <class T, class Allocator>
IReflectionVar* ReflectionDefinition<T, Allocator>::getVar(Hash32 name) const
{
return getVarT(name);
}
template <class T, class Allocator>
int32_t ReflectionDefinition<T, Allocator>::getNumFuncs(void) const
{
return static_cast<int32_t>(_funcs.size());
}
template <class T, class Allocator>
int32_t ReflectionDefinition<T, Allocator>::getNumFuncOverrides(int32_t index) const
{
GAFF_ASSERT(index < static_cast<int32_t>(_funcs.size()));
int32_t count = 0;
for (const IRefFuncPtr& func : (_funcs.begin() + index)->second.func) {
if (!func) {
break;
}
++count;
}
return count;
}
template <class T, class Allocator>
HashStringView32<> ReflectionDefinition<T, Allocator>::getFuncName(int32_t index) const
{
GAFF_ASSERT(index < static_cast<int32_t>(_funcs.size()));
return HashStringView32<>((_funcs.begin() + index)->first);
}
template <class T, class Allocator>
int32_t ReflectionDefinition<T, Allocator>::getFuncIndex(Hash32 name) const
{
const auto it = _funcs.find(name);
return (it == _funcs.end()) ? -1 : static_cast<int32_t>(eastl::distance(_funcs.begin(), it));
}
template <class T, class Allocator>
int32_t ReflectionDefinition<T, Allocator>::getNumStaticFuncs(void) const
{
return static_cast<int32_t>(_static_funcs.size());
}
template <class T, class Allocator>
int32_t ReflectionDefinition<T, Allocator>::getNumStaticFuncOverrides(int32_t index) const
{
GAFF_ASSERT(index < static_cast<int32_t>(_static_funcs.size()));
int32_t count = 0;
for (const IRefStaticFuncPtr& func : (_static_funcs.begin() + index)->second.func) {
if (!func) {
break;
}
++count;
}
return count;
}
template <class T, class Allocator>
HashStringView32<> ReflectionDefinition<T, Allocator>::getStaticFuncName(int32_t index) const
{
GAFF_ASSERT(index < static_cast<int32_t>(_static_funcs.size()));
return HashStringView32<>((_static_funcs.begin() + index)->first);
}
template <class T, class Allocator>
int32_t ReflectionDefinition<T, Allocator>::getStaticFuncIndex(Hash32 name) const
{
const auto it = _static_funcs.find(name);
return (it == _static_funcs.end()) ? -1 : static_cast<int32_t>(eastl::distance(_static_funcs.begin(), it));
}
template <class T, class Allocator>
int32_t ReflectionDefinition<T, Allocator>::getNumClassAttrs(void) const
{
return static_cast<int32_t>(_class_attrs.size());
}
template <class T, class Allocator>
const IAttribute* ReflectionDefinition<T, Allocator>::getClassAttr(Hash64 attr_name) const
{
return getAttribute(_class_attrs, attr_name);
}
template <class T, class Allocator>
const IAttribute* ReflectionDefinition<T, Allocator>::getClassAttr(int32_t index) const
{
GAFF_ASSERT(index < getNumClassAttrs());
return _class_attrs[index].get();
}
template <class T, class Allocator>
bool ReflectionDefinition<T, Allocator>::hasClassAttr(Hash64 attr_name) const
{
return getClassAttr(attr_name) != nullptr;
}
template <class T, class Allocator>
void ReflectionDefinition<T, Allocator>::addClassAttr(IAttribute& attribute)
{
_class_attrs.emplace_back(IAttributePtr(attribute.clone()));
}
template <class T, class Allocator>
int32_t ReflectionDefinition<T, Allocator>::getNumVarAttrs(Hash32 name) const
{
const auto it = _var_attrs.find(name);
return (it != _var_attrs.end()) ? static_cast<int32_t>(it->second.size()) : 0;
}
template <class T, class Allocator>
const IAttribute* ReflectionDefinition<T, Allocator>::getVarAttr(Hash32 name, Hash64 attr_name) const
{
const auto it = _var_attrs.find(name);
GAFF_ASSERT(it != _var_attrs.end());
return getAttribute(it->second, attr_name);
}
template <class T, class Allocator>
const IAttribute* ReflectionDefinition<T, Allocator>::getVarAttr(Hash32 name, int32_t index) const
{
const auto it = _var_attrs.find(name);
GAFF_ASSERT(it != _var_attrs.end());
GAFF_ASSERT(index < static_cast<int32_t>(it->second.size()));
return it->second[index].get();
}
template <class T, class Allocator>
bool ReflectionDefinition<T, Allocator>::hasVarAttr(Hash64 attr_name) const
{
for (const auto& attrs : _var_attrs) {
if (getAttribute(attrs.second, attr_name) != nullptr) {
return true;
}
}
return false;
}
template <class T, class Allocator>
int32_t ReflectionDefinition<T, Allocator>::getNumFuncAttrs(Hash64 name_arg_hash) const
{
const auto it = _func_attrs.find(name_arg_hash);
if (it == _func_attrs.end()) {
for (auto it_base = _base_classes.begin(); it_base != _base_classes.end(); ++it_base) {
const int32_t num = it_base->second->getNumFuncAttrs(name_arg_hash);
if (num > 0) {
return num;
}
}
} else {
return static_cast<int32_t>(it->second.size());
}
return 0;
}
template <class T, class Allocator>
const IAttribute* ReflectionDefinition<T, Allocator>::getFuncAttr(Hash64 name_arg_hash, Hash64 attr_name) const
{
const auto it = _func_attrs.find(name_arg_hash);
GAFF_ASSERT(it != _func_attrs.end());
return getAttribute(it->second, attr_name);
}
template <class T, class Allocator>
const IAttribute* ReflectionDefinition<T, Allocator>::getFuncAttr(Hash64 name_arg_hash, int32_t index) const
{
const auto it = _func_attrs.find(name_arg_hash);
if (it == _func_attrs.end()) {
for (auto it_base = _base_classes.begin(); it_base != _base_classes.end(); ++it_base) {
const int32_t num = it_base->second->getNumFuncAttrs(name_arg_hash);
if (num > 0) {
GAFF_ASSERT(index < num);
return it_base->second->getFuncAttr(name_arg_hash, index);
}
}
} else {
GAFF_ASSERT(index < static_cast<int32_t>(it->second.size()));
return it->second[index].get();
}
return nullptr;
}
template <class T, class Allocator>
bool ReflectionDefinition<T, Allocator>::hasFuncAttr(Hash64 attr_name) const
{
for (const auto& attrs : _func_attrs) {
if (getFuncAttr(attrs.first, attr_name) != nullptr) {
return true;
}
}
return false;
}
template <class T, class Allocator>
int32_t ReflectionDefinition<T, Allocator>::getNumStaticFuncAttrs(Hash64 name_arg_hash) const
{
const auto it = _static_func_attrs.find(name_arg_hash);
return (it != _static_func_attrs.end()) ? static_cast<int32_t>(it->second.size()) : 0;
}
template <class T, class Allocator>
const IAttribute* ReflectionDefinition<T, Allocator>::getStaticFuncAttr(Hash64 name_arg_hash, Hash64 attr_name) const
{
const auto it = _static_func_attrs.find(name_arg_hash);
GAFF_ASSERT(it != _static_func_attrs.end());
return getAttribute(it->second, attr_name);
}
template <class T, class Allocator>
const IAttribute* ReflectionDefinition<T, Allocator>::getStaticFuncAttr(Hash64 name_arg_hash, int32_t index) const
{
const auto it = _static_func_attrs.find(name_arg_hash);
GAFF_ASSERT(it != _static_func_attrs.end());
GAFF_ASSERT(index < static_cast<int32_t>(it->second.size()));
return it->second[index].get();
}
template <class T, class Allocator>
bool ReflectionDefinition<T, Allocator>::hasStaticFuncAttr(Hash64 attr_name) const
{
for (const auto& attrs : _static_func_attrs) {
if (getStaticFuncAttr(attrs.first, attr_name) != nullptr) {
return true;
}
}
return false;
}
template <class T, class Allocator>
int32_t ReflectionDefinition<T, Allocator>::getNumConstructors(void) const
{
return static_cast<int32_t>(_ctors.size());
}
template <class T, class Allocator>
IReflectionStaticFunctionBase* ReflectionDefinition<T, Allocator>::getConstructor(int32_t index) const
{
GAFF_ASSERT(index < static_cast<int32_t>(_ctors.size()));
return ((_ctors.begin()) + index)->second.get();
}
template <class T, class Allocator>
IReflectionDefinition::VoidFunc ReflectionDefinition<T, Allocator>::getConstructor(Hash64 ctor_hash) const
{
const auto it = _ctors.find(ctor_hash);
return it == _ctors.end() ? nullptr : it->second->getFunc();
}
template <class T, class Allocator>
IReflectionDefinition::VoidFunc ReflectionDefinition<T, Allocator>::getFactory(Hash64 ctor_hash) const
{
const auto it = _factories.find(ctor_hash);
return it == _factories.end() ? nullptr : it->second;
}
template <class T, class Allocator>
IReflectionStaticFunctionBase* ReflectionDefinition<T, Allocator>::getStaticFunc(int32_t name_index, int32_t override_index) const
{
GAFF_ASSERT(name_index < static_cast<int32_t>(_static_funcs.size()));
GAFF_ASSERT(override_index < StaticFuncData::k_num_overloads);
return (_static_funcs.begin() + name_index)->second.func[override_index].get();
}
template <class T, class Allocator>
IReflectionStaticFunctionBase* ReflectionDefinition<T, Allocator>::getStaticFunc(Hash32 name, Hash64 args) const
{
const auto it = _static_funcs.find(name);
if (it != _static_funcs.end()) {
for (int32_t i = 0; i < StaticFuncData::k_num_overloads; ++i) {
if (it->second.hash[i] == args) {
return it->second.func[i].get();
}
}
}
return nullptr;
}
template <class T, class Allocator>
IReflectionFunctionBase* ReflectionDefinition<T, Allocator>::getFunc(int32_t name_index, int32_t override_index) const
{
GAFF_ASSERT(name_index < static_cast<int32_t>(_funcs.size()));
GAFF_ASSERT(override_index < FuncData::k_num_overloads);
return (_funcs.begin() + name_index)->second.func[override_index].get();
}
template <class T, class Allocator>
IReflectionFunctionBase* ReflectionDefinition<T, Allocator>::getFunc(Hash32 name, Hash64 args) const
{
const auto it = _funcs.find(name);
if (it != _funcs.end()) {
for (int32_t i = 0; i < FuncData::k_num_overloads; ++i) {
if (it->second.hash[i] == args) {
return it->second.func[i].get();
}
}
}
return nullptr;
}
template <class T, class Allocator>
void ReflectionDefinition<T, Allocator>::destroyInstance(void* data) const
{
T* const instance = reinterpret_cast<T*>(data);
Deconstruct(instance);
}
template <class T, class Allocator>
typename ReflectionDefinition<T, Allocator>::IVar* ReflectionDefinition<T, Allocator>::getVarT(int32_t index) const
{
GAFF_ASSERT(index < static_cast<int32_t>(_vars.size()));
return (_vars.begin() + index)->second.get();
}
template <class T, class Allocator>
typename ReflectionDefinition<T, Allocator>::IVar* ReflectionDefinition<T, Allocator>::getVarT(Hash32 name) const
{
const auto it = _vars.find(name);
return (it == _vars.end()) ? nullptr : it->second.get();
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::friendlyName(const char* name)
{
_friendly_name = name;
return *this;
}
template <class T, class Allocator>
template <class Base>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::base(const char* name)
{
static_assert(std::is_base_of<Base, T>::value, "Class is not a base class of T.");
const ptrdiff_t offset = OffsetOfClass<T, Base>();
auto pair = std::move(
eastl::make_pair(
HashString64<Allocator>(name, _allocator),
offset
)
);
GAFF_ASSERT(_base_class_offsets.find(pair.first) == _base_class_offsets.end());
_base_class_offsets.insert(std::move(pair));
return *this;
}
template <class T, class Allocator>
template <class Base>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::base(void)
{
static_assert(std::is_base_of<Base, T>::value, "Class is not a base class of T.");
// So that hasInterface() calls will properly report inheritance if the base class hasn't been defined yet.
if (_base_class_offsets.find(GAFF_REFLECTION_NAMESPACE::Reflection<Base>::GetHash()) == _base_class_offsets.end()) {
base<Base>(GAFF_REFLECTION_NAMESPACE::Reflection<Base>::GetName());
}
// Add vars, funcs, and static funcs and attrs from base class.
if (GAFF_REFLECTION_NAMESPACE::Reflection<Base>::IsDefined()) {
const ReflectionDefinition<Base, Allocator>& base_ref_def = GAFF_REFLECTION_NAMESPACE::Reflection<Base>::GetReflectionDefinition();
// For calling base class functions.
_base_classes.emplace(
GAFF_REFLECTION_NAMESPACE::Reflection<Base>::GetHash(),
&base_ref_def
);
// Base class vars
for (auto& it : base_ref_def._vars) {
GAFF_ASSERT(_vars.find(it.first) == _vars.end());
eastl::pair<HashString32<Allocator>, IVarPtr> pair(
it.first,
IVarPtr(GAFF_ALLOCT(BaseVarPtr<Base>, _allocator, it.second.get()))
);
pair.second->setNoSerialize(!it.second->canSerialize());
pair.second->setReadOnly(it.second->isReadOnly());
_vars.insert(std::move(pair));
// Base class var attrs
const auto attr_it = base_ref_def._var_attrs.find(pair.first.getHash());
// Copy attributes
if (attr_it != base_ref_def._var_attrs.end()) {
auto& attrs = _var_attrs[pair.first.getHash()];
attrs.set_allocator(_allocator);
for (const IAttributePtr& attr : attr_it->second) {
if (attr->canInherit()) {
attrs.emplace_back(attr->clone());
}
}
}
}
// Base class funcs
for (auto& it : base_ref_def._funcs) {
FuncData& func_data = _funcs[it.first];
for (int32_t i = 0; i < FuncData::k_num_overloads; ++i) {
if (!it.second.func[i]) {
break;
}
int32_t index = -1;
for (int32_t j = 0; j < FuncData::k_num_overloads; ++j) {
if (!func_data.func[j]) {
index = j;
break;
}
if (it.second.hash[i] == func_data.hash[j]) {
break;
}
--index;
}
if (index < 0) {
GAFF_ASSERT_MSG(index > -(FuncData::k_num_overloads + 1), "Function overloading only supports %i overloads per function name!", FuncData::k_num_overloads);
continue;
}
ReflectionBaseFunction* const ref_func = GAFF_ALLOCT(
ReflectionBaseFunction,
_allocator,
it.second.func[i]->getBaseRefDef(),
it.second.func[i].get()
);
func_data.hash[index] = it.second.hash[i];
func_data.func[index].reset(ref_func);
// Copy attributes.
const Hash64 attr_hash = FNV1aHash64T(func_data.hash[i], FNV1aHash64T(FNV1aHash32T(it.first.getHash())));
const auto attr_it = base_ref_def._func_attrs.find(attr_hash);
if (attr_it != base_ref_def._func_attrs.end()) {
auto& attrs = _func_attrs[attr_hash];
attrs.set_allocator(_allocator);
for (const IAttributePtr& attr : attr_it->second) {
if (attr->canInherit()) {
attrs.emplace_back(attr->clone());
}
}
}
}
}
// Base class static funcs
for (auto& it : base_ref_def._static_funcs) {
StaticFuncData& static_func_data = _static_funcs[it.first];
for (int32_t i = 0; i < StaticFuncData::k_num_overloads; ++i) {
if (!it.second.func[i]) {
break;
}
int32_t index = -1;
for (int32_t j = 0; j < StaticFuncData::k_num_overloads; ++j) {
if (!static_func_data.func[j]) {
index = j;
break;
}
if (it.second.hash[i] == static_func_data.hash[j]) {
break;
}
}
if (index < 0) {
GAFF_ASSERT_MSG(index > -(StaticFuncData::k_num_overloads + 1), "Function overloading only supports %i overloads per function name!", StaticFuncData::k_num_overloads);
continue;
}
static_func_data.hash[index] = it.second.hash[i];
static_func_data.func[index].reset(it.second.func[i]->clone(_allocator));
// Copy attributes.
const Hash64 attr_hash = FNV1aHash64T(static_func_data.hash[i], FNV1aHash64T(FNV1aHash32T(it.first.getHash())));
const auto attr_it = base_ref_def._static_func_attrs.find(attr_hash);
if (attr_it != base_ref_def._static_func_attrs.end()) {
auto& attrs = _static_func_attrs[attr_hash];
attrs.set_allocator(_allocator);
for (const IAttributePtr& attr : attr_it->second) {
if (attr->canInherit()) {
attrs.emplace_back(attr->clone());
}
}
}
}
}
// Base class class attrs
for (const IAttributePtr& attr : base_ref_def._class_attrs) {
if (attr->canInherit()) {
_class_attrs.emplace_back(attr->clone());
}
}
// Register for callback if base class hasn't been defined yet.
} else {
++_dependents_remaining;
eastl::function<void (void)> cb(&RegisterBaseVariables<Base>);
GAFF_REFLECTION_NAMESPACE::Reflection<Base>::RegisterOnDefinedCallback(std::move(cb));
}
return *this;
}
template <class T, class Allocator>
template <class... Args>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::ctor(Hash64 factory_hash)
{
GAFF_ASSERT(!getFactory(factory_hash));
ConstructFuncT<T, Args...> construct_func = Gaff::ConstructFunc<T, Args...>;
FactoryFuncT<T, Args...> factory_func = Gaff::FactoryFunc<T, Args...>;
using ConstructorFunction = StaticFunction<void, T*, Args&&...>;
_ctors[factory_hash].reset(GAFF_ALLOCT(ConstructorFunction, _allocator, construct_func));
_factories.emplace(factory_hash, reinterpret_cast<VoidFunc>(factory_func));
return *this;
}
template <class T, class Allocator>
template <class... Args>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::ctor(void)
{
constexpr Hash64 hash = CalcTemplateHash<Args...>(k_init_hash64);
return ctor<Args...>(hash);
}
template <class T, class Allocator>
template <class Var, size_t name_size, class... Attrs>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::var(const char (&name)[name_size], Var T::*ptr, const Attrs&... attributes)
{
static_assert(!std::is_reference<Var>::value, "Cannot reflect references.");
static_assert(!std::is_pointer<Var>::value, "Cannot reflect pointers.");
static_assert(!std::is_const<Var>::value, "Cannot reflect const values.");
static_assert(GAFF_REFLECTION_NAMESPACE::Reflection<Var>::HasReflection, "Var type is not reflected!");
eastl::pair<HashString32<Allocator>, IVarPtr> pair(
HashString32<Allocator>(name, name_size - 1, _allocator),
IVarPtr(GAFF_ALLOCT(VarPtr<Var>, _allocator, ptr))
);
GAFF_ASSERT(_vars.find(pair.first) == _vars.end());
auto& attrs = _var_attrs[FNV1aHash32Const(name)];
attrs.set_allocator(_allocator);
if constexpr (sizeof...(Attrs) > 0) {
addAttributes(pair.second.get(), ptr, attrs, attributes...);
}
_vars.insert(std::move(pair));
return *this;
}
template <class T, class Allocator>
template <class Enum, size_t name_size, class... Attrs>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::var(const char (&name)[name_size], Flags<Enum> T::*ptr, const Attrs&... attributes)
{
static_assert(std::is_enum<Enum>::value, "Flags does not contain an enum.");
static_assert(GAFF_REFLECTION_NAMESPACE::Reflection<Enum>::HasReflection, "Enum is not reflected!");
eastl::pair<HashString32<Allocator>, IVarPtr> pair(
HashString32<Allocator>(name, name_size - 1, _allocator),
IVarPtr(GAFF_ALLOCT(VarPtr< Flags<Enum> >, _allocator, ptr))
);
GAFF_ASSERT(_vars.find(pair.first) == _vars.end());
auto& attrs = _var_attrs[FNV1aHash32Const(name)];
attrs.set_allocator(_allocator);
if constexpr (sizeof...(Attrs) > 0) {
addAttributes(pair.second.get(), ptr, attrs, attributes...);
}
_vars.insert(std::move(pair));
// For each reflected entry in Enum, add a reflection var for that entry.
const IEnumReflectionDefinition& ref_def = GAFF_REFLECTION_NAMESPACE::Reflection<Enum>::GetReflectionDefinition();
const int32_t num_entries = ref_def.getNumEntries();
for (int32_t i = 0; i < num_entries; ++i) {
const HashStringView32<> flag_name = ref_def.getEntryNameFromIndex(i);
const int32_t flag_index = ref_def.getEntryValue(i);
U8String<Allocator> flag_path(_allocator);
flag_path.append_sprintf("%s/%s", name, flag_name.getBuffer());
eastl::pair<HashString32<Allocator>, IVarPtr> flag_pair(
HashString32<Allocator>(flag_path),
IVarPtr(GAFF_ALLOCT(VarFlagPtr<Enum>, _allocator, ptr, static_cast<uint8_t>(i)))
);
flag_pair.second->setNoSerialize(true);
_vars.insert(std::move(flag_pair));
}
return *this;
}
template <class T, class Allocator>
template <class Ret, class Var, size_t name_size, class... Attrs>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::var(const char (&name)[name_size], Ret (T::*getter)(void) const, void (T::*setter)(Var), const Attrs&... attributes)
{
//static_assert(std::is_reference<Ret>::value || std::is_pointer<Ret>::value, "Function version of var() only supports reference and pointer return types!");
using RetNoRef = typename std::remove_reference<Ret>::type;
using RetNoPointer = typename std::remove_pointer<RetNoRef>::type;
using RetNoConst = typename std::remove_const<RetNoPointer>::type;
using VarNoRef = typename std::remove_reference<Var>::type;
using VarNoPointer = typename std::remove_pointer<VarNoRef>::type;
using VarNoConst = typename std::remove_const<VarNoPointer>::type;
static_assert(GAFF_REFLECTION_NAMESPACE::Reflection<RetNoConst>::HasReflection, "Getter return type is not reflected!");
static_assert(GAFF_REFLECTION_NAMESPACE::Reflection<VarNoConst>::HasReflection, "Setter arg type is not reflected!");
eastl::pair<HashString32<Allocator>, IVarPtr> pair;
if constexpr (std::is_reference<Ret>::value || std::is_pointer<Ret>::value) {
using PtrType = VarFuncPtr<Ret, Var>;
pair = eastl::pair<HashString32<Allocator>, IVarPtr>(
HashString32<Allocator>(name, name_size - 1, _allocator),
IVarPtr(GAFF_ALLOCT(PtrType, _allocator, getter, setter))
);
} else {
using PtrType = VarFuncPtrWithCache<Ret, Var>;
pair = eastl::pair<HashString32<Allocator>, IVarPtr>(
HashString32<Allocator>(name, name_size - 1, _allocator),
IVarPtr(GAFF_ALLOCT(PtrType, _allocator, getter, setter))
);
}
GAFF_ASSERT(_vars.find(pair.first) == _vars.end());
auto& attrs = _var_attrs[FNV1aHash32Const(name)];
attrs.set_allocator(_allocator);
if constexpr (sizeof...(Attrs) > 0) {
addAttributes(pair.second.get(), getter, setter, attrs, attributes...);
}
_vars.insert(std::move(pair));
return *this;
}
template <class T, class Allocator>
template <class Ret, class Var, size_t name_size, class... Attrs>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::var(const char (&name)[name_size], Ret (*getter)(const T&), void (*setter)(T&, Var), const Attrs&... attributes)
{
//static_assert(std::is_reference<Ret>::value || std::is_pointer<Ret>::value, "Function version of var() only supports reference and pointer return types!");
using RetNoRef = typename std::remove_reference<Ret>::type;
using RetNoPointer = typename std::remove_pointer<RetNoRef>::type;
using RetNoConst = typename std::remove_const<RetNoPointer>::type;
using VarNoRef = typename std::remove_reference<Var>::type;
using VarNoPointer = typename std::remove_pointer<VarNoRef>::type;
using VarNoConst = typename std::remove_const<VarNoPointer>::type;
static_assert(GAFF_REFLECTION_NAMESPACE::Reflection<RetNoConst>::HasReflection, "Getter return type is not reflected!");
static_assert(GAFF_REFLECTION_NAMESPACE::Reflection<VarNoConst>::HasReflection, "Setter arg type is not reflected!");
eastl::pair<HashString32<Allocator>, IVarPtr> pair;
if constexpr (std::is_reference<Ret>::value || std::is_pointer<Ret>::value) {
using PtrType = VarFuncPtr<Ret, Var>;
pair = eastl::pair<HashString32<Allocator>, IVarPtr>(
HashString32<Allocator>(name, name_size - 1, _allocator),
IVarPtr(GAFF_ALLOCT(PtrType, _allocator, getter, setter))
);
} else {
using PtrType = VarFuncPtrWithCache<Ret, Var>;
pair = eastl::pair<HashString32<Allocator>, IVarPtr>(
HashString32<Allocator>(name, name_size - 1, _allocator),
IVarPtr(GAFF_ALLOCT(PtrType, _allocator, getter, setter))
);
}
GAFF_ASSERT(_vars.find(pair.first) == _vars.end());
auto& attrs = _var_attrs[FNV1aHash32Const(name)];
attrs.set_allocator(_allocator);
if constexpr (sizeof...(Attrs) > 0) {
addAttributes(pair.second.get(), getter, setter, attrs, attributes...);
}
_vars.insert(std::move(pair));
return *this;
}
template <class T, class Allocator>
template <class Var, class Vec_Allocator, size_t name_size, class... Attrs>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::var(const char (&name)[name_size], Vector<Var, Vec_Allocator> T::*vec, const Attrs&... attributes)
{
static_assert(!std::is_reference<Var>::value, "Cannot reflect references.");
static_assert(!std::is_pointer<Var>::value, "Cannot reflect pointers.");
static_assert(!std::is_const<Var>::value, "Cannot reflect const values.");
static_assert(GAFF_REFLECTION_NAMESPACE::Reflection<Var>::HasReflection, "Vector data type is not reflected!");
using PtrType = VectorPtr<Var, Vec_Allocator>;
eastl::pair<HashString32<Allocator>, IVarPtr> pair(
HashString32<Allocator>(name, name_size - 1, _allocator),
IVarPtr(GAFF_ALLOCT(PtrType, _allocator, vec))
);
GAFF_ASSERT(_vars.find(pair.first) == _vars.end());
auto& attrs = _var_attrs[FNV1aHash32Const(name)];
attrs.set_allocator(_allocator);
if constexpr (sizeof...(Attrs) > 0) {
addAttributes(pair.second.get(), vec, attrs, attributes...);
}
_vars.insert(std::move(pair));
return *this;
}
template <class T, class Allocator>
template <class Var, size_t array_size, size_t name_size, class... Attrs>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::var(const char (&name)[name_size], Var (T::*arr)[array_size], const Attrs&... attributes)
{
static_assert(!std::is_reference<Var>::value, "Cannot reflect references.");
static_assert(!std::is_pointer<Var>::value, "Cannot reflect pointers.");
static_assert(!std::is_const<Var>::value, "Cannot reflect const values.");
static_assert(GAFF_REFLECTION_NAMESPACE::Reflection<Var>::HasReflection, "Array data type is not reflected!");
using PtrType = ArrayPtr<Var, array_size>;
eastl::pair<HashString32<Allocator>, IVarPtr> pair(
HashString32<Allocator>(name, name_size - 1, _allocator),
IVarPtr(GAFF_ALLOCT(PtrType, _allocator, arr))
);
GAFF_ASSERT(_vars.find(pair.first) == _vars.end());
auto& attrs = _var_attrs[FNV1aHash32Const(name)];
attrs.set_allocator(_allocator);
if constexpr (sizeof...(Attrs) > 0) {
addAttributes(pair.second.get(), arr, attrs, attributes...);
}
_vars.insert(std::move(pair));
return *this;
}
template <class T, class Allocator>
template <class Key, class Value, class VecMap_Allocator, size_t name_size, class... Attrs>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::var(const char(&name)[name_size], VectorMap<Key, Value, VecMap_Allocator> T::* vec_map, const Attrs&... attributes)
{
static_assert(!std::is_reference<Key>::value, "Cannot reflect references.");
static_assert(!std::is_pointer<Key>::value, "Cannot reflect pointers.");
static_assert(!std::is_const<Key>::value, "Cannot reflect const values.");
static_assert(GAFF_REFLECTION_NAMESPACE::Reflection<Key>::HasReflection, "Key data type is not reflected!");
static_assert(!std::is_reference<Value>::value, "Cannot reflect references.");
static_assert(!std::is_pointer<Value>::value, "Cannot reflect pointers.");
static_assert(!std::is_const<Value>::value, "Cannot reflect const values.");
static_assert(GAFF_REFLECTION_NAMESPACE::Reflection<Value>::HasReflection, "Value data type is not reflected!");
using PtrType = VectorMapPtr<Key, Value, VecMap_Allocator>;
eastl::pair<HashString32<Allocator>, IVarPtr> pair(
HashString32<Allocator>(name, name_size - 1, _allocator),
IVarPtr(GAFF_ALLOCT(PtrType, _allocator, vec_map))
);
GAFF_ASSERT(_vars.find(pair.first) == _vars.end());
auto& attrs = _var_attrs[FNV1aHash32Const(name)];
attrs.set_allocator(_allocator);
if constexpr (sizeof...(Attrs) > 0) {
addAttributes(pair.second.get(), vec_map, attrs, attributes...);
}
_vars.insert(std::move(pair));
return *this;
}
template <class T, class Allocator>
template <size_t name_size, class Ret, class... Args, class... Attrs>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::func(const char (&name)[name_size], Ret (T::*ptr)(Args...) const, const Attrs&... attributes)
{
constexpr Hash64 arg_hash = CalcTemplateHash<Ret, Args...>(k_init_hash64);
auto it = _funcs.find(FNV1aHash32Const(name));
if (it == _funcs.end()) {
ReflectionFunction<true, Ret, Args...>* const ref_func = GAFF_ALLOCT(
GAFF_SINGLE_ARG(ReflectionFunction<true, Ret, Args...>),
_allocator,
ptr
);
eastl::pair<HashString32<Allocator>, FuncData> pair(
HashString32<Allocator>(name, name_size - 1, _allocator),
FuncData()
);
it = _funcs.insert(std::move(pair)).first;
it->second.func[0].reset(ref_func);
it->second.hash[0] = arg_hash;
} else {
FuncData& func_data = it->second;
bool found = false;
for (int32_t i = 0; i < FuncData::k_num_overloads; ++i) {
GAFF_ASSERT(!func_data.func[i] || func_data.hash[i] != arg_hash);
if (!func_data.func[i] || func_data.func[i]->isBase()) {
ReflectionFunction<true, Ret, Args...>* const ref_func = GAFF_ALLOCT(
GAFF_SINGLE_ARG(ReflectionFunction<true, Ret, Args...>),
_allocator,
ptr
);
func_data.func[i].reset(ref_func);
func_data.hash[i] = arg_hash;
found = true;
break;
}
}
GAFF_ASSERT_MSG(found, "Function overloading only supports 8 overloads per function name!");
}
const Hash32 name_hash = FNV1aHash32Const(name);
const Hash64 attr_hash = FNV1aHash64T(arg_hash, FNV1aHash64T(name_hash));
auto& attrs = _func_attrs[attr_hash];
attrs.set_allocator(_allocator);
if constexpr (sizeof...(Attrs) > 0) {
addAttributes(ptr, attrs, attributes...);
}
return *this;
}
template <class T, class Allocator>
template <size_t name_size, class Ret, class... Args, class... Attrs>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::func(const char (&name)[name_size], Ret (T::*ptr)(Args...), const Attrs&... attributes)
{
constexpr Hash64 arg_hash = CalcTemplateHash<Ret, Args...>(k_init_hash64);
auto it = _funcs.find(FNV1aHash32Const(name));
if (it == _funcs.end()) {
ReflectionFunction<false, Ret, Args...>* const ref_func = GAFF_ALLOCT(
GAFF_SINGLE_ARG(ReflectionFunction<false, Ret, Args...>),
_allocator,
ptr
);
it = _funcs.emplace(
HashString32<Allocator>(name, name_size - 1, _allocator),
FuncData()
).first;
it->second.func[0].reset(ref_func);
it->second.hash[0] = arg_hash;
} else {
FuncData& func_data = it->second;
bool found = false;
for (int32_t i = 0; i < FuncData::k_num_overloads; ++i) {
GAFF_ASSERT(!func_data.func[i] || func_data.hash[i] != arg_hash);
if (!func_data.func[i] || func_data.func[i]->isBase()) {
ReflectionFunction<false, Ret, Args...>* const ref_func = GAFF_ALLOCT(
GAFF_SINGLE_ARG(ReflectionFunction<false, Ret, Args...>),
_allocator,
ptr
);
func_data.func[i].reset(ref_func);
func_data.hash[i] = arg_hash;
found = true;
break;
}
}
GAFF_ASSERT_MSG(found, "Function overloading only supports 8 overloads per function name!");
}
const Hash32 name_hash = FNV1aHash32Const(name);
const Hash64 attr_hash = FNV1aHash64T(arg_hash, FNV1aHash64T(name_hash));
auto& attrs = _func_attrs[attr_hash];
attrs.set_allocator(_allocator);
if constexpr (sizeof...(Attrs) > 0) {
addAttributes(ptr, attrs, attributes...);
}
return *this;
}
template <class T, class Allocator>
template <size_t name_size, class Ret, class... Args, class... Attrs>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::staticFunc(const char (&name)[name_size], Ret (*func)(Args...), const Attrs&... attributes)
{
constexpr Hash64 arg_hash = CalcTemplateHash<Ret, Args...>(k_init_hash64);
auto it = _static_funcs.find(FNV1aHash32Const(name));
using StaticFuncType = StaticFunction<Ret, Args...>;
if (it == _static_funcs.end()) {
it = _static_funcs.emplace(
HashString32<Allocator>(name, name_size - 1, _allocator),
StaticFuncData(_allocator)
).first;
it->second.func[0].reset(GAFF_ALLOCT(StaticFuncType, _allocator, func));
it->second.hash[0] = arg_hash;
} else {
StaticFuncData& func_data = it->second;
bool found = false;
for (int32_t i = 0; i < FuncData::k_num_overloads; ++i) {
// Replace an open slot or replace an already existing overload.
if (func_data.func[i] && func_data.hash[i] != arg_hash) {
continue;
}
func_data.func[i].reset(GAFF_ALLOCT(StaticFuncType, _allocator, func));
func_data.hash[i] = arg_hash;
found = true;
break;
}
GAFF_ASSERT_MSG(found, "Function overloading only supports 8 overloads per function name!");
}
const Hash32 name_hash = FNV1aHash32Const(name);
const Hash64 attr_hash = FNV1aHash64T(arg_hash, FNV1aHash64T(name_hash));
auto& attrs = _static_func_attrs[attr_hash];
attrs.set_allocator(_allocator);
if constexpr (sizeof...(Attrs) > 0) {
addAttributes(func, attrs, attributes...);
}
return *this;
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opAdd(void)
{
staticFunc(OP_ADD_NAME, Add<T, Other>);
return staticFunc(OP_ADD_NAME, Add<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opSub(void)
{
staticFunc(OP_SUB_NAME, Sub<T, Other>);
return staticFunc(OP_SUB_NAME, Sub<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opMul(void)
{
staticFunc(OP_MUL_NAME, Mul<T, Other>);
return staticFunc(OP_MUL_NAME, Mul<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opDiv(void)
{
staticFunc(OP_DIV_NAME, Div<T, Other>);
return staticFunc(OP_DIV_NAME, Div<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opMod(void)
{
staticFunc(OP_MOD_NAME, Mod<T, Other>);
return staticFunc(OP_MOD_NAME, Mod<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opBitAnd(void)
{
staticFunc(OP_BIT_AND_NAME, BitAnd<T, Other>);
return staticFunc(OP_BIT_AND_NAME, BitAnd<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opBitOr(void)
{
staticFunc(OP_BIT_OR_NAME, BitOr<T, Other>);
return staticFunc(OP_BIT_OR_NAME, BitOr<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opBitXor(void)
{
staticFunc(OP_BIT_XOR_NAME, BitXor<T, Other>);
return staticFunc(OP_BIT_XOR_NAME, BitXor<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opBitShiftLeft(void)
{
staticFunc(OP_BIT_SHIFT_LEFT_NAME, BitShiftLeft<T, Other>);
return staticFunc(OP_BIT_SHIFT_LEFT_NAME, BitShiftLeft<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opBitShiftRight(void)
{
staticFunc(OP_BIT_SHIFT_RIGHT_NAME, BitShiftRight<T, Other>);
return staticFunc(OP_BIT_SHIFT_RIGHT_NAME, BitShiftRight<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opAnd(void)
{
staticFunc(OP_LOGIC_AND_NAME, LogicAnd<T, Other>);
return staticFunc(OP_LOGIC_AND_NAME, LogicAnd<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opOr(void)
{
staticFunc(OP_LOGIC_OR_NAME, LogicOr<T, Other>);
return staticFunc(OP_LOGIC_OR_NAME, LogicOr<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opEqual(void)
{
staticFunc(OP_EQUAL_NAME, Equal<T, Other>);
return staticFunc(OP_EQUAL_NAME, Equal<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opLessThan(void)
{
staticFunc(OP_LESS_THAN_NAME, LessThan<T, Other>);
return staticFunc(OP_LESS_THAN_NAME, LessThan<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opGreaterThan(void)
{
staticFunc(OP_GREATER_THAN_NAME, GreaterThan<T, Other>);
return staticFunc(OP_GREATER_THAN_NAME, GreaterThan<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opLessThanOrEqual(void)
{
staticFunc(OP_LESS_THAN_OR_EQUAL_NAME, LessThanOrEqual<T, Other>);
return staticFunc(OP_LESS_THAN_OR_EQUAL_NAME, LessThanOrEqual<Other, T>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opGreaterThanOrEqual(void)
{
staticFunc(OP_GREATER_THAN_OR_EQUAL_NAME, GreaterThanOrEqual<T, Other>);
return staticFunc(OP_GREATER_THAN_OR_EQUAL_NAME, GreaterThanOrEqual<Other, T>);
}
template <class T, class Allocator>
template <class... Args>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opCall(void)
{
return staticFunc(OP_CALL_NAME, Call<T, Args...>);
}
template <class T, class Allocator>
template <class Other>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opIndex(void)
{
return staticFunc(OP_INDEX_NAME, Index<T, Other>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opAdd(void)
{
return staticFunc(OP_ADD_NAME, Add<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opSub(void)
{
return staticFunc(OP_SUB_NAME, Sub<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opMul(void)
{
return staticFunc(OP_MUL_NAME, Mul<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opDiv(void)
{
return staticFunc(OP_DIV_NAME, Div<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opMod(void)
{
return staticFunc(OP_MOD_NAME, Mod<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opBitAnd(void)
{
return staticFunc(OP_BIT_AND_NAME, BitAnd<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opBitOr(void)
{
return staticFunc(OP_BIT_OR_NAME, BitOr<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opBitXor(void)
{
return staticFunc(OP_BIT_XOR_NAME, BitXor<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opBitNot(void)
{
return staticFunc(OP_BIT_NOT_NAME, BitNot<T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opBitShiftLeft(void)
{
return staticFunc(OP_BIT_SHIFT_LEFT_NAME, BitShiftLeft<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opBitShiftRight(void)
{
return staticFunc(OP_BIT_SHIFT_RIGHT_NAME, BitShiftRight<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opAnd(void)
{
return staticFunc(OP_LOGIC_AND_NAME, LogicAnd<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opOr(void)
{
return staticFunc(OP_LOGIC_OR_NAME, LogicOr<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opEqual(void)
{
return staticFunc(OP_EQUAL_NAME, Equal<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opLessThan(void)
{
return staticFunc(OP_LESS_THAN_NAME, LessThan<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opGreaterThan(void)
{
return staticFunc(OP_GREATER_THAN_NAME, GreaterThan<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opLessThanOrEqual(void)
{
return staticFunc(OP_LESS_THAN_OR_EQUAL_NAME, LessThanOrEqual<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opGreaterThanOrEqual(void)
{
return staticFunc(OP_GREATER_THAN_OR_EQUAL_NAME, GreaterThanOrEqual<T, T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opMinus(void)
{
return staticFunc(OP_MINUS_NAME, Minus<T>);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opPlus(void)
{
return staticFunc(OP_PLUS_NAME, Plus<T>);
}
template <class T, class Allocator>
template <int32_t (*to_string_func)(const T&, char*, int32_t)>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::opToString()
{
staticFunc(OP_TO_STRING_NAME, ToStringHelper<T>::ToString<to_string_func>);
return staticFunc(OP_TO_STRING_NAME, to_string_func);
}
template <class T, class Allocator>
template <class... Attrs>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::classAttrs(const Attrs&... attributes)
{
return addAttributes(_class_attrs, attributes...);
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::version(uint32_t /*version*/)
{
return *this;
}
template <class T, class Allocator>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::serialize(LoadFunc serialize_load, SaveFunc serialize_save)
{
_serialize_load = serialize_load;
_serialize_save = serialize_save;
return *this;
}
template <class T, class Allocator>
void ReflectionDefinition<T, Allocator>::finish(void)
{
if (!_dependents_remaining) {
// Call finish() on attributes first.
for (IAttributePtr& attr : _class_attrs) {
attr->finish(*this);
}
for (auto& it : _var_attrs) {
for (IAttributePtr& attr : it.second) {
attr->finish(*this);
}
}
for (auto& it : _func_attrs) {
for (IAttributePtr& attr : it.second) {
attr->finish(*this);
}
}
for (auto& it : _static_func_attrs) {
for (IAttributePtr& attr : it.second) {
attr->finish(*this);
}
}
if (_friendly_name.empty()) {
_friendly_name = getReflectionInstance().getName();
}
}
}
template <class T, class Allocator>
template <class Base>
void ReflectionDefinition<T, Allocator>::RegisterBaseVariables(void)
{
ReflectionDefinition<T, Allocator>& ref_def = const_cast<ReflectionDefinition<T, Allocator>&>(
GAFF_REFLECTION_NAMESPACE::Reflection<T>::GetReflectionDefinition()
);
--ref_def._dependents_remaining;
GAFF_ASSERT(ref_def._dependents_remaining >= 0);
ref_def.base<Base>();
if (ref_def._dependents_remaining == 0) {
ref_def.finish();
}
}
// Variables
template <class T, class Allocator>
template <class Var, class First, class... Rest>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::addAttributes(IReflectionVar* ref_var, Var T::*var, Vector<IAttributePtr, Allocator>& attrs, const First& first, const Rest&... rest)
{
First* const clone = reinterpret_cast<First*>(first.clone());
attrs.emplace_back(IAttributePtr(clone));
clone->apply(*ref_var, var);
if constexpr (sizeof...(Rest) > 0) {
return addAttributes(ref_var, var, attrs, rest...);
} else {
return *this;
}
}
template <class T, class Allocator>
template <class Var, class Ret, class First, class... Rest>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::addAttributes(IReflectionVar* ref_var, Ret (T::*getter)(void) const, void (T::*setter)(Var), Vector<IAttributePtr, Allocator>& attrs, const First& first, const Rest&... rest)
{
First* const clone = reinterpret_cast<First*>(first.clone());
attrs.emplace_back(IAttributePtr(clone));
clone->apply(*ref_var, getter, setter);
if constexpr (sizeof...(Rest) > 0) {
return addAttributes(ref_var, getter, setter, attrs, rest...);
} else {
return *this;
}
}
// Functions
template <class T, class Allocator>
template <class Ret, class... Args, class First, class... Rest>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::addAttributes(Ret (T::*func)(Args...) const, Vector<IAttributePtr, Allocator>& attrs, const First& first, const Rest&... rest)
{
First* const clone = reinterpret_cast<First*>(first.clone());
attrs.emplace_back(IAttributePtr(clone));
clone->apply(func);
if constexpr (sizeof...(Rest) > 0) {
return addAttributes(func, attrs, rest...);
} else {
return *this;
}
}
template <class T, class Allocator>
template <class Ret, class... Args, class First, class... Rest>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::addAttributes(Ret (T::*func)(Args...), Vector<IAttributePtr, Allocator>& attrs, const First& first, const Rest&... rest)
{
First* const clone = reinterpret_cast<First*>(first.clone());
attrs.emplace_back(IAttributePtr(clone));
clone->apply(func);
if constexpr (sizeof...(Rest) > 0) {
return addAttributes(func, attrs, rest...);
} else {
return *this;
}
}
// Static Functions
template <class T, class Allocator>
template <class Ret, class... Args, class First, class... Rest>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::addAttributes(Ret (*func)(Args...), Vector<IAttributePtr, Allocator>& attrs, const First& first, const Rest&... rest)
{
First* const clone = reinterpret_cast<First*>(first.clone());
attrs.emplace_back(IAttributePtr(clone));
clone->apply(func);
if constexpr (sizeof...(Rest) > 0) {
return addAttributes(func, attrs, rest...);
} else {
return *this;
}
}
// Non-apply() call version.
template <class T, class Allocator>
template <class First, class... Rest>
ReflectionDefinition<T, Allocator>& ReflectionDefinition<T, Allocator>::addAttributes(Vector<IAttributePtr, Allocator>& attrs, const First& first, const Rest&... rest)
{
attrs.emplace_back(IAttributePtr(first.clone()));
if constexpr (sizeof...(Rest) > 0) {
return addAttributes(attrs, rest...);
} else {
return *this;
}
}
template <class T, class Allocator>
ptrdiff_t ReflectionDefinition<T, Allocator>::getBasePointerOffset(Hash64 interface_name) const
{
const auto it = _base_class_offsets.find(interface_name);
return (it != _base_class_offsets.end()) ? it->second : -1;
}
template <class T, class Allocator>
void ReflectionDefinition<T, Allocator>::instantiated(void* object) const
{
for (const IAttributePtr& attr : _class_attrs) {
const_cast<IAttributePtr&>(attr)->instantiated(object, *this);
}
for (auto& it : _var_attrs) {
for (const IAttributePtr& attr : it.second) {
const_cast<IAttributePtr&>(attr)->instantiated(object, *this);
}
}
for (auto& it : _func_attrs) {
for (const IAttributePtr& attr : it.second) {
const_cast<IAttributePtr&>(attr)->instantiated(object, *this);
}
}
for (auto& it : _static_func_attrs) {
for (const IAttributePtr& attr : it.second) {
const_cast<IAttributePtr&>(attr)->instantiated(object, *this);
}
}
}
template <class T, class Allocator>
const IAttribute* ReflectionDefinition<T, Allocator>::getAttribute(const AttributeList& attributes, Hash64 attr_name) const
{
for (const auto& attr : attributes) {
if (attr->getReflectionDefinition().hasInterface(attr_name)) {
return attr.get();
}
}
return nullptr;
}
NS_END
|
def IsInvertedSolid(*args):
return _BOPTools.BOPTools_AlgoTools_IsInvertedSolid(*args) |
// eccAdd Gets two points on an elliptic curve mod p and returns their sum.
// Assumes the points are given in affine form (x, y) and have different x coordinates.
func eccAdd(point1 [2]*big.Int, point2 [2]*big.Int, p *big.Int) [2]*big.Int {
d1 := big.NewInt(0).Sub(point1[1], point2[1])
d2 := big.NewInt(0).Sub(point1[0], point2[0])
m := divMod(d1, d2, p)
x := big.NewInt(0)
x.Sub(big.NewInt(0).Mul(m, m), point1[0])
x.Sub(x, point2[0])
x.Mod(x, p)
y := big.NewInt(0)
y.Mul(m, big.NewInt(0).Sub(point1[0], x))
y.Sub(y, point1[1])
y.Mod(y, p)
return [2]*big.Int{x, y}
} |
A gun show in Texas in 2016. Photo by Spencer Platt/Getty Images
On Sunday, Americans were reminded that at nearly any time and in nearly any place, there is the possibility that a man—almost always a man—will shoot and kill you. Most mass shootings go relatively unremarked upon; what made Las Vegas so notable was the terrible scale. Fifty-nine dead, 527 injured, a gunman with dozens of weapons and a terrifyingly well-thought-out plan for how to inflict maximum damage on a crowd of strangers. As of Tuesday morning, his motives were still unclear, providing no purchase for any public debate about extremism or mental illness. The only thing that is clear is that he was a man who wanted to kill people—and that guns gave him the ability to do so.
The relevant political question is whether the Republican Party, which currently controls Congress and the White House, has any interest in trying to prevent this sort of thing. Recent history tells us that it does not.
The facts and figures of gun violence are by now familiar, since they enter the public consciousness every time a mass shooting is sufficiently horrific to become national news. Compared to other developed countries, the US has far more guns and far more mass shootings and gun deaths, including homicides, suicides, and accidents. In Nevada, more people die by gunfire than die of car accidents. It's also incredibly easy to buy a gun in that state—thanks to the "gun show loophole," you can purchase almost any kind of firearm you want without having to undergo a background check, which the Vegas shooter, Stephen Paddock, had reportedly passed anyway.
No legal regime can prevent every determined madman from killing people, especially someone like Paddock, who apparently didn't have a history of mental illness or violence. But there are reforms that have become blindingly obvious to both experts and regular citizens, making the last several years of congressional inaction and complete surrender to the gun lobby all the more obscene.
The most clear-cut reform on the table is expanding background checks to more gun sales and preventing some people, like violent criminals, the mentally ill, and especially domestic abusers, from buying guns. A poll conducted this year by the New York Times found that more than 80 percent of Americans support those proposals. Mandating that gun owners store their weapons safely and forcing gun buyers to go through a waiting period are also popular and endorsed by experts who study gun violence.
Those measures might reduce the number of gun deaths in the US—for instance, waiting periods could prevent distraught people from buying handguns they might immediately use to commit suicide. But it's hard to see how they would have stopped something like Vegas. That's where proposals banning high-capacity magazines and "bump stocks" that effectively convert semiautomatics into automatics can come into play—those restrictions, advocates say, would at least make it harder for mass shooters to kill dozens of people. (Paddock reportedly had bump stocks among the 23 guns stashed in his hotel room.)
Even simpler than any of that would be supplying money to fund research into gun violence. It remains a shockingly understudied topic, thanks to Congress effectively banning any federal cash from going to research that might lead to the conclusion that more guns mean more people are killed by those guns. That ban makes it harder for emergency rooms to know how to deal with mass shootings.
Any gun control measure will inevitably face court challenges: A high-capacity magazine ban in California is being fought in court right now; last year, the Supreme Court ruled it was constitutional to prohibit domestic abusers from owning guns. But the problem isn't in the courts but in Congress, where gun control is regarded as a political impossibility thanks to the outsized influence of the gun lobby on the Republican Party.
In 2013, after a disturbed shooter murdered 20 children and six adults at a school in Sandy Hook, Connecticut, Democrat Joe Manchin and Republican Pat Toomey, both pro-gun senators, backed a measure to require background checks on gun sales that weren't between friends or family members. It was the most moderate reform you could imagine, and it still failed thanks in large part to opposition from Republicans (a few pro-gun Democrats also voted against it).
Most voters—including many Republicans—may support that sort of policy, but pro-gun voices led by the NRA are extremely loud and incredibly angry. As a result, any kind of rational debate has been blocked. The position of the NRA and its allies is that guns are good and the only solution to gun violence is to have more guns so ordinary citizens can shoot anyone who tries to shoot them. That logic has led to the NRA backing "open carry," making it legal to display weapons in public, a practice opposed by many police departments and which was once denounced by the NRA itself. (In the wake of the Vegas shooting, the group had gone silent, as it usually does after mass shootings.)
It's a testament to where the gun conversation is right now that many observers suspect the likeliest fallout from Vegas will be the defeat of a pending bill promising to make it easier to buy silencers. That is, the worst mass shooting in modern American history will result in a momentary halt to the rollback of restrictions on who can buy what kind of weapon.
Donald Trump said Tuesday that America will "be talking about gun laws as time goes by." But we already talked about gun laws plenty before Trump even became a candidate, much less among the more pro-gun presidents in history. More talk is not required. The terms of the debate are clear: Congress can attempt to reduce gun violence by making it harder to buy guns, or it can do nothing. If it does nothing, it is tacitly admitting that mass shootings are something Americans should simply have to live with— if they should be so lucky.
Follow Harry Cheadle on Twitter. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.