content
stringlengths 10
4.9M
|
---|
for i in range(int(input())):
r, c = map(int, input().split())
r, c = abs(int(r)), abs(int(c))
ans = 2*min(r,c) + abs(r-c) + max(0, abs(r-c)-1)
print(ans)
|
Re-using Auxiliary Variables for MaxSAT Preprocessing
Solvers for the maximum satisfiability (MaxSAT) problem -- a well-known optimization variant of Boolean satisfiability (SAT) -- are finding an increasing number of applications. Preprocessing has proven an integral part of the SAT-based approach to efficiently solving various types of real-world problem instances. It was recently shown that SAT preprocessing for MaxSAT becomes more effective by re-using the auxiliary variables introduced in the preprocessing phase directly in the SAT solver within a core-based hybrid MaxSAT solver. We take this idea of re-using auxiliary variables further by identifying them among variables already present in the input MaxSAT instance. Such variables can be re-used already in the preprocessing step, avoiding the introduction of multiple layers of new auxiliary variables in the process. Empirical results show that by detecting auxiliary variables in the input MaxSAT instances can lead to modest additional runtime improvements when applied before preprocessing. Furthermore, we show that by re-using auxiliary variables not only within preprocessing but also as assumptions within the SAT solver of the MaxHS MaxSAT algorithm can alone lead to performance improvements similar to those observed by applying SAT-based preprocessing.
|
/*
* Wipes all data from the current object
*/
public void clear() {
if (content == null) return;
PIVSecurityProvider.zeroise(content, (short) 0, (short) content.length);
bytesAllocated = 0;
if (JCSystem.isObjectDeletionSupported()) {
content = null;
JCSystem.requestObjectDeletion();
}
}
|
<gh_stars>1-10
/*************************************************************************
* Copyright 2021 Gravwell, Inc. All rights reserved.
* Contact: <<EMAIL>>
*
* This software may be modified and distributed under the terms of the
* BSD 2-clause license. See the LICENSE file for details.
**************************************************************************/
package types
import (
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"math"
"strconv"
)
var (
ErrInvalidGeofence = errors.New("Invalid geofence")
)
type PointmapKV struct {
Key string
Value string
}
type Location struct {
Lat float64
Long float64
}
type PointmapValue struct {
Loc Location
Metadata []PointmapKV `json:",omitempty"`
}
type PointmapRequest struct {
BaseRequest
Fence Geofence `json:",omitempty"`
}
type PointmapResponse struct {
BaseResponse
Entries []PointmapValue `json:",omitempty"`
}
type HeatmapRequest struct {
BaseRequest
Fence Geofence `json:",omitempty"`
}
type HeatmapResponse struct {
BaseResponse
Entries []HeatmapValue `json:",omitempty"`
}
type HeatmapValue struct {
Location
Magnitude float64 `json:",omitempty"`
}
type P2PValue struct {
Src Location
Dst Location
Magnitude float64 `json:",omitempty"`
Values []string `json:",omitempty"`
}
type P2PRequest struct {
BaseRequest
Fence Geofence `json:",omitempty"`
}
type P2PResponse struct {
BaseResponse
ValueNames []string
Entries []P2PValue `json:",omitempty"`
}
type Geofence struct {
SouthWest Location `json:",omitempty"`
NorthEast Location `json:",omitempty"`
enabled bool
}
func (pkv PointmapKV) IsEmpty() bool {
return len(pkv.Key) == 0 || len(pkv.Value) == 0
}
func (gf *Geofence) CrossesAntimeridian() bool {
return gf.SouthWest.Long > gf.NorthEast.Long
}
func (gf *Geofence) Validate() error {
if gf.SouthWest.Lat == 0.0 && gf.SouthWest.Long == 0.0 && gf.NorthEast.Lat == 0.0 && gf.NorthEast.Long == 0.0 {
return nil
}
// Check bounds of Lat / Long
if !gf.SouthWest.Valid() || !gf.NorthEast.Valid() {
return ErrInvalidGeofence
}
// Ensure that "SouthWest" is not North of "NorthEast"
if gf.SouthWest.Lat > gf.NorthEast.Lat {
return ErrInvalidGeofence
}
// SouthWest.Long can be greater than NorthEast.Long (A bounds that crosses the antimeridian)
// Or
// SouthWest.Long can be less than NorthEast.Long (A bounds that DOES NOT cross the antimeridian)
gf.enabled = true
return nil
}
func (gf *Geofence) InFence(loc Location) bool {
if !loc.Valid() {
return false
}
if !gf.enabled {
return true //if its not enabled, everything is always in the fence
}
if loc.Lat > gf.NorthEast.Lat || loc.Lat < gf.SouthWest.Lat {
return false
}
if gf.CrossesAntimeridian() {
if loc.Long > gf.NorthEast.Long && loc.Long < gf.SouthWest.Long {
return false
}
} else {
if loc.Long > gf.NorthEast.Long || loc.Long < gf.SouthWest.Long {
return false
}
}
return true
}
func (loc Location) Encode() (v []byte) {
v = make([]byte, 16)
binary.LittleEndian.PutUint64(v, math.Float64bits(loc.Lat))
binary.LittleEndian.PutUint64(v[8:], math.Float64bits(loc.Long))
return
}
func (loc *Location) Decode(v []byte) bool {
if len(v) < 16 {
return false
}
loc.Lat = math.Float64frombits(binary.LittleEndian.Uint64(v))
loc.Long = math.Float64frombits(binary.LittleEndian.Uint64(v[8:]))
return true
}
func (loc Location) String() string {
return fmt.Sprintf("%f %f", loc.Lat, loc.Long)
}
func (loc Location) Valid() bool {
// Latitude is bound -90 to 90
if loc.Lat > 90.0 || loc.Lat < -90.0 {
return false
}
// Longitude is bound -180 to 180
if loc.Long > 180.0 || loc.Long < -180.0 {
return false
}
return true
}
func (pkv PointmapKV) MarshalJSON() (r []byte, err error) {
r = []byte(fmt.Sprintf(`{"%s":%s}`, pkv.Key, strconv.Quote(pkv.Value)))
return
}
func (hv HeatmapValue) MarshalJSON() ([]byte, error) {
if hv.Magnitude == 0.0 {
return []byte(fmt.Sprintf(`[%f, %f]`, hv.Lat, hv.Long)), nil
}
return []byte(fmt.Sprintf(`[%f, %f, %f]`, hv.Lat, hv.Long, hv.Magnitude)), nil
}
func (hv *HeatmapValue) UnmarshalJSON(data []byte) error {
var a []float64
if err := json.Unmarshal(data, &a); err != nil {
return err
}
if len(a) != 3 && len(a) != 2 {
return fmt.Errorf("Expected 2 or 3 values, got %v", len(a))
}
hv.Lat = a[0]
hv.Long = a[1]
if len(a) == 3 {
hv.Magnitude = a[2]
}
return nil
}
|
Plasma and Erythrocyte Vitamin Ε Levels in Children with Insulin Dependent Diabetes Mellitus
Vitamin E is considered to be one of the most important antioxidants. There is a trend today to supply diabetic children with vitamin E in order to prevent microvascular complications. In this study, our objective was to demonstrate validity of plasma and erythrocyte vitamin E levels in diabetic children. This study was conducted on twenty-five diabetic patients aged from 7-16 years and ten non-diabetic, age-matched healthy subjects as the control group. Vitamin E levels were measured by high-performance liquid chromatography. There was no significant difference between the mean plasma vitamin E levels of diabetic and control groups, 870.80 +/- 220.51 micrograms/dl and 891 +/- 221.21 micrograms/dl, respectively (p > 0.05). The mean erythrocyte vitamin E levels of diabetic and control groups were significantly different: 183.12 +/- 62.58 micrograms/dl and 246.90 +/- 68.26 micrograms/dl, respectively (p < 0.05). Erythrocyte vitamin E levels were significantly lower than plasma vitamin E levels in both groups. We further investigated whether a correlation exists between plasma and erythrocyte vitamin E levels and duration of diabetes, insulin dose and HbA1c measurements. However no correlation was found. In conclusion, measurement of erythrocyte vitamin E levels may be considered to be more valuable than plasma vitamin E levels in diabetic children and supplementation may be provided according to erythrocyte levels rather than plasma levels.
|
<reponame>TecArt/servicecatalog-development
/*
* Copyright 2005 <NAME>.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.sf.j2ep.model;
import javax.servlet.http.HttpServletRequest;
/**
* A server container is a object can can hold a server. It
* is used to do initial processing of the data in a request
* before a server is created. This can be used to make some
* decisions on the server creating based on the request to
* allow more advanced server.
*
* Usually the server can implement both this interface and
* the server interface since the usage is rather linked.
*
* @author <NAME>
*/
public interface ServerContainer {
/**
* Do any processing needed before this server can be used.
* Specifically important for cluster servers that needs
* to choose which server to use for the current request.
*
* @return The server that is finished to be used.
*/
Server getServer(HttpServletRequest request);
/**
* Returns the mapped rule. This method must return the same
* rule as a call to the underlying servers getRule().
*
* @return The rule we are mapped to
* @see Server#getRule()
*/
Rule getRule();
/**
* Sets the rule that is mapped for this server. Will
* be used when we rewrite links to know how a absolute
* path should be rewritten.
*
* @param rule The rule
*/
void setRule(Rule rule);
/**
* Finds a server with the full path specified by the
* location sent in.
*
* @param link The start of a link that a server is mapped to
* @return The server that can handle the link, null if no server is found
*/
Server getServerMapped(String link);
}
|
<reponame>wirrareka/react-fonticonpicker
// Copyright (c) 2018 <NAME> <<EMAIL>>
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
import React from "react";
import FipCategory from "./FipCategory";
import FipSearch from "./FipSearch";
import FipIconContainer from "./FipIconContainer";
import { getPossibleCategories } from "../helpers/iconHelpers";
interface IFipDropDownProps extends React.HTMLAttributes<Element> {
isMulti: boolean;
value: number | string | any[];
currentCategory: number;
currentPage: number;
currentSearch: string;
icons:
| number[]
| string[]
| {
[key: string]: number[] | string[]
};
search?: any | string[];
showCategory: boolean;
showSearch: boolean;
iconsPerPage: number;
allCatPlaceholder: string;
searchPlaceholder: string;
noIconPlaceholder: string;
renderIcon: (...args: any[]) => any;
handleChangeValue: (...args: any[]) => any;
handleChangeCategory: (...args: any[]) => any;
handleChangePage: (...args: any[]) => any;
handleChangeSearch: (...args: any[]) => any;
}
type FipDropDownState = {
categories?: any,
searchString?: any
};
class FipDropDown extends React.PureComponent<
IFipDropDownProps,
FipDropDownState
> {
static defaultProps = {
search: null
};
static getDerivedStateFromProps(nextProps) {
// Get categories and flattened source
let categories = getPossibleCategories(nextProps.icons);
if (categories !== null) {
categories = [nextProps.allCatPlaceholder, ...categories];
}
// assign to the state
// rest is handled by props
return { categories, searchString: nextProps.currentSearch };
}
constructor(props) {
super(props);
this.state = {};
}
/**
* Handle category change
*
* Sets internal state and also calls the parent app.
*/
handleCategory = event => {
// Get the category
let currentCategory = parseInt(event.target.value, 10);
if (Number.isNaN(currentCategory)) {
currentCategory = 0;
}
// call the external handler
this.props.handleChangeCategory(currentCategory);
// Also change the pagenumber to 0
this.props.handleChangePage(0);
};
handleSearch = event => {
const currentSearch = event.target.value;
// Change the state of the parent
this.props.handleChangeSearch(currentSearch);
};
render() {
return (
<div className="rfipdropdown__selector">
{this.props.showSearch ? (
<FipSearch
handleSearch={this.handleSearch}
value={this.state.searchString}
placeholder={this.props.searchPlaceholder}
/>
) : null}
{this.props.showCategory &&
this.state.categories &&
this.state.categories.length ? (
<FipCategory
handleCategory={this.handleCategory}
value={this.props.currentCategory}
categories={this.state.categories}
/>
) : null}
<FipIconContainer
categories={this.state.categories}
currentCategory={this.props.currentCategory}
isMulti={this.props.isMulti}
icons={this.props.icons}
search={this.props.search}
value={this.props.value}
currentSearch={this.props.currentSearch}
handleChangeValue={this.props.handleChangeValue}
currentPage={this.props.currentPage}
iconsPerPage={this.props.iconsPerPage}
handleChangePage={this.props.handleChangePage}
renderIcon={this.props.renderIcon}
noIconPlaceholder={this.props.noIconPlaceholder}
/>
</div>
);
}
}
export default FipDropDown;
|
def dsn(self):
if self._dsn_env_var:
return etl.db.parse_connection_string(etl.config.env.get(self._dsn_env_var))
|
n,m,k = [int(i) for i in input().split()]
a = [int(i) for i in input().split()]
b = [int(i) for i in input().split()]
d = {}
for i in range(n):
d[a[i]] = i
import math
total = 0
for i in range(m):
x = d[b[i]]
total += math.ceil((x+1)/k)
if x > 0:
d[a[x-1]],d[a[x]] = d[a[x]],d[a[x-1]]
a[x-1],a[x] = a[x],a[x-1]
print(total)
|
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2016, Andrew Dornbush
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
////////////////////////////////////////////////////////////////////////////////
/// \author Andrew Dornbush
#ifndef SMPL_ADAPTIVE_WORKSPACE_LATTICE_H
#define SMPL_ADAPTIVE_WORKSPACE_LATTICE_H
// standard includes
#include <functional>
#include <ostream>
#include <tuple>
#include <vector>
// project includes
#include <smpl/graph/adaptive_graph_extension.h>
#include <smpl/graph/motion_primitive.h>
#include <smpl/graph/workspace_lattice_base.h>
#include <smpl/grid/grid.h>
#include <smpl/occupancy_grid.h>
#include <smpl/time.h>
#include <smpl/types.h>
namespace smpl {
/// Base class for adaptive states. Denotes whether a state is high dimensional.
struct AdaptiveState
{
bool hid;
};
struct AdaptiveGridState : public AdaptiveState
{
double x;
double y;
double z;
int gx;
int gy;
int gz;
};
std::ostream& operator<<(std::ostream& o, const AdaptiveGridState& s);
inline
bool operator==(const AdaptiveGridState& a, const AdaptiveGridState& b)
{
return std::tie(a.gx, a.gy, a.gz) == std::tie(b.gx, b.gy, b.gz);
}
struct AdaptiveWorkspaceState : public AdaptiveState
{
RobotState state;
WorkspaceCoord coord;
};
std::ostream& operator<<(std::ostream& o, const AdaptiveWorkspaceState& s);
inline
bool operator==(
const AdaptiveWorkspaceState& a,
const AdaptiveWorkspaceState& b)
{
return a.coord == b.coord;
}
} // namespace smpl
// std::hash specializations for state types
namespace std {
template <>
struct hash<smpl::AdaptiveGridState>
{
typedef smpl::AdaptiveGridState argument_type;
typedef std::size_t result_type;
result_type operator()(const argument_type& s) const;
};
template <>
struct hash<smpl::AdaptiveWorkspaceState>
{
typedef smpl::AdaptiveWorkspaceState argument_type;
typedef std::size_t result_type;
result_type operator()(const argument_type& s) const;
};
} // namespace std
namespace smpl {
class AdaptiveWorkspaceLattice :
public WorkspaceLatticeBase,
public AdaptiveGraphExtension,
public PointProjectionExtension
{
public:
~AdaptiveWorkspaceLattice();
/// \name Reimplemented Public Functions from WorkspaceLatticeBase
///@{
bool init(
RobotModel* robot,
CollisionChecker* checker,
const Params& params,
const OccupancyGrid* grid);
///@}
/// \name Required Public Functions from PointProjectionExtension
///@{
bool projectToPoint(int state_id, Vector3& pos) override;
///@}
/// \name Required Public Functions from AdaptiveGraphExtension
///@{
bool addHighDimRegion(int state_id) override;
bool setTunnel(const std::vector<int>& states) override;
bool isExecutable(const std::vector<int>& states) const override;
bool setTrackMode(const std::vector<int>& tunnel) override;
bool setPlanMode() override;
///@}
/// \name Required Public Functions from RobotPlanningSpcae
///@{
int getStartStateID() const override;
int getGoalStateID() const override;
bool extractPath(
const std::vector<int>& ids,
std::vector<RobotState>& path) override;
///@}
/// \name Reimplemneted Functions from RobotPlanningSpace
///@{
bool setStart(const RobotState& state) override;
bool setGoal(const GoalConstraint& goal) override;
///@}
/// \name Required Public Functions from Extension
///@{
Extension* getExtension(size_t class_code) override;
///@}
/// \name required Public Functions from DiscreteSpaceInformation
///@{
void GetSuccs(
int state_id,
std::vector<int>* succs,
std::vector<int>* costs) override;
void GetPreds(
int state_id,
std::vector<int>* preds,
std::vector<int>* costs) override;
void PrintState(int state_id, bool verbose, FILE* f = nullptr) override;
///@}
private:
const OccupancyGrid* m_grid = nullptr;
AdaptiveState* m_goal_state = nullptr;
int m_goal_state_id = -1;
AdaptiveState* m_start_state = nullptr;
int m_start_state_id = -1;
typedef AdaptiveWorkspaceState HiStateKey;
typedef PointerValueHash<HiStateKey> HiStateHash;
typedef PointerValueEqual<HiStateKey> HiStateEqual;
typedef AdaptiveGridState LoStateKey;
typedef PointerValueHash<LoStateKey> LoStateHash;
typedef PointerValueEqual<LoStateKey> LoStateEqual;
hash_map<HiStateKey*, int, HiStateHash, HiStateEqual> m_hi_to_id;
hash_map<LoStateKey*, int, LoStateHash, LoStateEqual> m_lo_to_id;
std::vector<AdaptiveState*> m_states;
clock::time_point m_t_start;
mutable bool m_near_goal = false;
std::vector<Vector3> m_lo_prims;
std::vector<MotionPrimitive> m_hi_prims;
bool m_ik_amp_enabled = true;
double m_ik_amp_thresh = 0.2;
int m_region_radius = 1;
int m_tunnel_radius = 3;
bool m_plan_mode = true;
struct AdaptiveGridCell
{
int grow_count;
bool plan_hd; //planning_hd;
bool trak_hd;
AdaptiveGridCell() : grow_count(0), plan_hd(false), trak_hd(false) { }
};
Grid3<AdaptiveGridCell> m_dim_grid;
bool initMotionPrimitives();
bool setGoalPose(const GoalConstraint& goal);
void GetSuccs(
const AdaptiveGridState& state,
std::vector<int>* succs,
std::vector<int>* costs);
void GetSuccs(
const AdaptiveWorkspaceState& state,
std::vector<int>* succs,
std::vector<int>* costs);
int reserveHashEntry(bool hid);
bool isHighDimensional(int gx, int gy, int gz) const;
AdaptiveState* getHashEntry(int state_id) const;
AdaptiveWorkspaceState* getHiHashEntry(int state_id) const;
AdaptiveGridState* getLoHashEntry(int state_id) const;
int getHiHashEntry(const WorkspaceCoord& coord);
int getLoHashEntry(int x, int y, int z);
int createHiState(const WorkspaceCoord& coord, const RobotState& state);
int createLoState(int x, int y, int z, double wx, double wy, double wz);
void getActions(
const AdaptiveWorkspaceState& state,
std::vector<Action>& actions);
bool checkAction(
const RobotState& state,
const Action& action,
RobotState* final_rstate = nullptr);
bool isGoal(const WorkspaceState& state) const;
bool isLoGoal(double x, double y, double z) const;
auto getStateVisualization(const RobotState& state, const std::string& ns)
-> std::vector<visual::Marker>;
auto getAdaptiveGridVisualization(bool plan_mode) const -> visual::Marker;
};
} // namespace smpl
#endif
|
def runFastqScreen(infiles, outfile):
tempdir = P.getTempDir(".")
outdir = os.path.join(PARAMS["exportdir"], "fastq_screen")
job_threads = re.findall(r'--threads \d+', PARAMS['fastq_screen_options'])
if len(job_threads) != 1:
raise ValueError("Wrong number of threads for fastq_screen")
job_threads = int(re.sub(r'--threads ', '', job_threads[0]))
job_memory = "8G"
with IOTools.openFile(os.path.join(tempdir, "fastq_screen.conf"),
"w") as f:
for i, k in list(PARAMS.items()):
if i.startswith("fastq_screen_database"):
f.write("DATABASE\t%s\t%s\n" % (i[22:], k))
m = PipelineMapping.FastqScreen()
statement = m.build((infiles,), outfile)
P.run()
shutil.rmtree(tempdir)
P.touch(outfile)
|
Early warning monitoring and management of disasters
Everyone would admit that disaster early warning is more important than later treatment and damage repair. If an effective tsunami early warning system had been in place in the Indian Ocean region on 26 December 2004, thousands of lives would have been saved. The same stark lesson can be drawn from other disasters that have killed tens of thousands of people in the past few years. Effective early warning systems not only save lives but also help protect livelihoods and assets created by national development. This paper addresses the issue of disaster early warning monitoring and management in a systemic manner and offers a general approach to a management solution. From the viewpoint of control theory, it depicts the disaster early warning monitoring and management as an information chain which has five links: disaster model bank link, disaster monitoring network link, disaster transmission channel, disaster analysis and management link and decision making and commanding link. The five links constitute an information loop, with disaster data being collected, processed through the chain and control information being fed back to the different links. With some vivid examples, this paper indicates the weakness of current links in the existing disaster early warning and management systems. On the basis of all the above analyses, the paper finally puts forward some suggestions in order to improve the performance of early warning monitoring and management of disasters.
|
Matrix pencil method for estimating radar cross‐section of moving targets with near‐field measurements
In this article, an algorithm is introduced for estimating the radar cross‐section of moving targets with near‐field measurements. This algorithm uses the matrix pencil method that facilitates an efficient compensation of range‐Doppler coupling. Furthermore, the scattering centers are extracted from Chirp ISAR data to calculate the radar cross‐section. © 2016 Wiley Periodicals, Inc. Microwave Opt Technol Lett 58:471–477, 2016
|
import { Injectable } from '@angular/core';
import { CurrentUser } from '../shared/models/user';
import { Observable, of, ReplaySubject } from 'rxjs';
import { HttpClient, HttpHeaders } from '@angular/common/http';
import { map } from 'rxjs/operators';
import { Router } from '@angular/router';
import { MessengerService } from '../shared/service/messenger.service';
@Injectable({
providedIn: 'root'
})
export class AccountService {
private currentUserSource = new ReplaySubject<CurrentUser>(1);
currentUser$ = this.currentUserSource.asObservable();
constructor(
private http: HttpClient,
private messengerService: MessengerService,
private router: Router
) {}
register(values: any): Observable<void> {
return this.http.post('/api/v1/register', values).pipe(
map((user: CurrentUser) => {
if (user) {
localStorage.setItem('token', user.token);
this.currentUserSource.next(user);
}
})
);
}
login(values: any): Observable<any> {
return this.http.post('/api/v1/login', values).pipe(
map((user: CurrentUser) => {
if (user) {
localStorage.setItem('token', user.token);
this.currentUserSource.next(user);
}
})
);
}
logout(): void {
localStorage.removeItem('token');
localStorage.removeItem('cartItems');
this.currentUserSource.next(null);
this.messengerService.updateCart();
this.router.navigateByUrl('/account/login');
}
loadCurrentUser(token: string): any {
if (token === null) {
this.currentUserSource.next(null);
return of(null);
}
let headers = new HttpHeaders();
headers = headers.set('Authorization', `Bearer ${token}`);
return this.http.get('/api/v1/account', { headers }).pipe(
map((user: CurrentUser) => {
if (user) {
localStorage.setItem('token', user.token);
this.currentUserSource.next(user);
}
})
);
}
}
|
/* Restore previous windows status and raster timings */
static void
tegra_dc_sor_restore_win_and_raster(struct display_controller *disp_ctrl,
int *dc_reg_ctx)
{
int selected_windows, i;
selected_windows = READL(&disp_ctrl->cmd.disp_win_header);
for (i = 0; i < DC_N_WINDOWS; ++i) {
WRITEL(WINDOW_A_SELECT << i, &disp_ctrl->cmd.disp_win_header);
WRITEL(dc_reg_ctx[i], &disp_ctrl->win.win_opt);
WRITEL(WIN_A_ACT_REQ << i, &disp_ctrl->cmd.state_ctrl);
}
WRITEL(selected_windows, &disp_ctrl->cmd.disp_win_header);
WRITEL(dc_reg_ctx[i++], &disp_ctrl->disp.ref_to_sync);
WRITEL(dc_reg_ctx[i++], &disp_ctrl->disp.sync_width);
WRITEL(dc_reg_ctx[i++], &disp_ctrl->disp.back_porch);
WRITEL(dc_reg_ctx[i++], &disp_ctrl->disp.front_porch);
WRITEL(dc_reg_ctx[i++], &disp_ctrl->disp.disp_active);
WRITEL(GENERAL_UPDATE, &disp_ctrl->cmd.state_ctrl);
}
|
/**
*
* @author Chris Baker <[email protected]>
*/
public class ItemTest {
static final Logger LOG = LoggerFactory.getLogger(ItemTest.class);
@BeforeClass
public static void setUpBeforeClass() throws Exception {}
@Before
public void setUp() throws Exception {}
@Test
public void determineCumulativeDiscount() {
Money cost = Money.parse("USD 1.00");
Money list = Money.parse("USD 2.00");
Item item =
new Item("test1", "determineCumulativeDiscount", cost, new RetailPrice(list,
LocalDate.now()));
Discount tenPercent2Days = new Discount(DiscountPercent.TEN, LocalDate.now().plusDays(2));
item.addDiscount(tenPercent2Days);
Discount fifteenPercent3Days =
new Discount(DiscountPercent.FIFTEEN, LocalDate.now().plusDays(3), LocalDate.now().plusDays(6));
item.addDiscount(fifteenPercent3Days);
assertEquals(cost, item.getCost());
assertEquals(list, item.determineCurrentRetailPrice().getMoney());
LOG.debug("Now Current List Price: " + item.determineCurrentRetailPrice().getMoney());
LOG.debug("Now Discount Price: " + item.applyDiscounts(LocalDate.now()));
LOG.debug("2 Day Current List Price: " + item.determineCurrentRetailPrice(LocalDate.now().plusDays(2)).getMoney());
LOG.debug("2 Day Discount Price: " + item.applyDiscounts(LocalDate.now().plusDays(2)));
LOG.debug("3 Day Current List Price: " + item.determineCurrentRetailPrice(LocalDate.now().plusDays(3)).getMoney());
LOG.debug("3 Day Discount Price: " + item.applyDiscounts(LocalDate.now().plusDays(3)));
LOG.debug("3 Day Cumulative Discount: " + item.determineCumulativeDiscount(LocalDate.now().plusDays(3)));
// XXX This could be less brittle
assertEquals(24, item.determineCumulativeDiscount(LocalDate.now().plusDays(3)));
LOG.debug("10 Day Current List Price: " + item.determineCurrentRetailPrice(LocalDate.now().plusDays(10)).getMoney());
LOG.debug("10 Day Discount Price: " + item.applyDiscounts(LocalDate.now().plusDays(10)));
assertEquals(item.determineCurrentRetailPrice(), item.determineCurrentRetailPrice(LocalDate.now().plusDays(10)));
assertTrue("Not yet implemented", true);
}
@Test
public void determineLongestDiscountDaysCount_noDiscount_returnsNegative1() {
Money cost = Money.parse("USD 1.00");
Money list = Money.parse("USD 2.00");
Item item =
new Item("test1", "determineCumulativeDiscount", cost, new RetailPrice(list,
LocalDate.now()));
List<Discount> discounts = item.determineDiscountsThatApply(LocalDate.now());
assertEquals(0, discounts.size());
assertEquals(-1, item.determineLongestDiscountDaysCount(LocalDate.now()));
}
@Test
public void getEarliestDiscountThatApplies() {
Money cost = Money.parse("USD 2.00");
Money list = Money.parse("USD 4.00");
Item item =
new Item("test2", "isRedPencil_20percent32Days", cost, new RetailPrice(list,
LocalDate.now().minusDays(200)));
assertTrue(item.isRetailPriceStable());
item.addDiscount(new Discount(DiscountPercent.TWENTY, LocalDate.now()));
Optional<Discount> discount = item.getEarliestDiscountThatApplies();
assertTrue(discount.isPresent());
assertEquals(LocalDate.now(), discount.get().getStartDate());
// Now rewind one day
discount = item.getEarliestDiscountThatApplies(LocalDate.now().minusDays(1));
assertFalse(discount.isPresent());
}
@Test
public void isRetailPriceStable() {
Money cost = Money.parse("USD 1.00");
Money list = Money.parse("USD 2.00");
Item item =
new Item("test1", "determineCumulativeDiscount", cost, new RetailPrice(list,
LocalDate.now().minusDays(31)));
assertTrue(item.isRetailPriceStable());
item =
new Item("test1", "determineCumulativeDiscount", cost, new RetailPrice(list,
LocalDate.now().minusDays(30)));
assertFalse(item.isRetailPriceStable());
}
@Test
public void isRedPencil_20percent32Days() {
Money cost = Money.parse("USD 2.00");
Money list = Money.parse("USD 4.00");
Item item =
new Item("test2", "isRedPencil_20percent32Days", cost, new RetailPrice(list,
LocalDate.now().minusDays(31)));
item.addDiscount(new Discount(DiscountPercent.TWENTY, LocalDate.now()));
assertTrue(item.isRetailPriceStable());
assertTrue(item.isRedPencil());
assertTrue(item.isRedPencil(LocalDate.now().plusDays(20)));
assertFalse(item.isRedPencil(LocalDate.now().plusDays(40)));
// Now add a discount that after 10 days puts it over the 30% threshold.
item.addDiscount(new Discount(DiscountPercent.FIFTEEN, LocalDate.now().plusDays(10)));
assertTrue(item.isRedPencil());
assertFalse(item.isRedPencil(LocalDate.now().plusDays(20)));
assertFalse(item.isRedPencil(LocalDate.now().plusDays(40)));
}
@Test
public void isRedPencil_oldDiscountWithinWindow() {
Money cost = Money.parse("USD 2.00");
Money list = Money.parse("USD 4.00");
Item item =
new Item("test2", "isRedPencil_20percent32Days", cost, new RetailPrice(list, LocalDate
.now().minusDays(200)));
assertTrue(item.isRetailPriceStable());
item.addDiscount(new Discount(DiscountPercent.TWENTY, LocalDate.now()));
Optional<Discount> discount = item.getEarliestDiscountThatApplies();
assertTrue(discount.isPresent());
assertEquals(1, item.getDiscounts().size());
assertEquals(1, item.determineDiscountsThatApply().size());
assertTrue(item.isRetailPriceStable());
assertTrue(item.isRedPencil());
// So far everything is fine
// Now let's add an expired discount within the window.
item.addDiscount(new Discount(DiscountPercent.FIVE, LocalDate.now().minusDays(20), LocalDate
.now().minusDays(15)));
LOG.debug(item.toString());
discount = item.getEarliestDiscountThatApplies();
assertTrue(discount.isPresent());
assertEquals(2, item.getDiscounts().size());
assertEquals(1, item.determineDiscountsThatApply().size());
assertFalse(item.isRedPencil());
assertTrue(item.hasAPreviousDiscountWithinRedPencilWindow());
assertTrue(item.isRetailPriceStable());
}
}
|
void func(char* a, char* b, int i, int j, int n);
int main()
{
char *a, *b;
int i = 0, j, n;
scanf_s("%d", &n);
rewind(stdin);
a = (char*)malloc(1);
while ((a[i++] = getchar())
!= '\n') + 1);
!= '\n') + 1);
i, int j,int n)
(char*)realloc(b, i (i1 = 0; i1 < i; i1++)
j1 = j+1+i1; while (j1>n) {
b[j1--] = b[j1 - 1]; }
a = (char*)realloc(a, i - 1] = '\0'; (char*)malloc(1);
0;
a[i
b =
j =
while ((b[j++] = getchar())
b = (char*)realloc(b, j b[j - 1] = '\0';
func(a, b, i - 1, j - 1,n);
}
void func(char* a, char* b, int {
int i1,
if (i >
{
b = for
{
j1, j2; j)
}
i1 = 0;
j2 = n + 1;
while (a[i1])
b[j2++] = a[i1++];
puts(b);
+ j);
} else {
a = (char*)realloc(a, i + j);
for (i1 = 0; i1 < j; i1++) {
j1 = i + 1 + i1;
while (j1 > n) {
a[j1--] = a[j1 - 1];
}
}
i1 = 0;
j2 = n + 1;
while (b[i1])
a[j2++] = b[i1++];
puts(a);
} }
|
/*
* Copyright 2017-2021 Micro Focus or one of its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.cafdataprocessing.workflow.testing.models;
import com.hpe.caf.worker.document.model.Application;
import com.hpe.caf.worker.document.model.Document;
import com.hpe.caf.worker.document.model.Failures;
import com.hpe.caf.worker.document.model.Field;
import com.hpe.caf.worker.document.model.Fields;
import com.hpe.caf.worker.document.model.Subdocuments;
import com.hpe.caf.worker.document.model.Task;
import java.util.Map;
public class DocumentMock implements Document
{
private String reference;
private Fields fields;
private final Task task;
private final Map<String, String> customData;
private final Failures failures;
private final Subdocuments subdocuments;
private final Application application;
private final Document parentDocument;
private Document rootDocument;
public DocumentMock(final String reference, final Fields fields, final Task task, final Map<String, String> customData,
final Failures failures, final Subdocuments subdocuments, final Application application,
final Document parentDocument, final Document rootDocument)
{
this.reference = reference;
this.fields = fields;
this.task = task;
this.customData = customData;
this.failures = failures;
this.subdocuments = subdocuments == null ? new SubdocumentsMock() : subdocuments;
this.application = application;
this.parentDocument = parentDocument;
this.rootDocument = rootDocument;
}
@Override
public Task getTask()
{
return task;
}
@Override
public String getReference()
{
return reference;
}
@Override
public void setReference(final String reference)
{
this.reference = reference;
}
@Override
public void resetReference()
{
this.reference = null;
}
@Override
public Fields getFields()
{
return fields;
}
@Override
public Field getField(final String fieldName)
{
return fields.stream().filter(f -> f.getName().equals(fieldName)).findFirst().orElseGet(() -> {
final Field fieldMocked = new FieldMock(this, fieldName, application);
final FieldsMock fields = (FieldsMock) this.fields;
fields.addField(fieldMocked);
return fieldMocked;
});
}
@Override
public String getCustomData(final String dataKey)
{
return customData.get(dataKey);
}
@Override
public Failures getFailures()
{
return failures;
}
@Override
public void addFailure(final String failureId, final String failureMessage)
{
this.failures.add(failureId, failureMessage);
}
@Override
public Document getParentDocument()
{
return parentDocument;
}
@Override
public Document getRootDocument()
{
return rootDocument;
}
@Override
public Subdocuments getSubdocuments()
{
return subdocuments;
}
@Override
public boolean hasSubdocuments()
{
if (subdocuments == null) {
return false;
} else {
return !subdocuments.isEmpty();
}
}
@Override
public boolean hasChanges()
{
return true;
}
@Override
public void reset()
{
}
@Override
public Application getApplication()
{
return application;
}
public void setFields(final Fields fields)
{
this.fields = fields;
}
public void setRootDocument(Document rootDocument)
{
this.rootDocument = rootDocument;
}
}
|
<filename>packages/watchmen-web-client/src/console/connected-space/report-view/editor/filter-tab/filters/expression-filter/index.tsx<gh_stars>100-1000
import {ParameterKind} from '@/services/data/tuples/factor-calculator-types';
import {Report, ReportFilterExpression, ReportFilterJoint} from '@/services/data/tuples/report-types';
import {Subject} from '@/services/data/tuples/subject-types';
import {ICON_DELETE} from '@/widgets/basic/constants';
import {Lang} from '@/widgets/langs';
import {FontAwesomeIcon} from '@fortawesome/react-fontawesome';
import React, {MouseEvent} from 'react';
import {FilterEventBusProvider} from '../filter-event-bus';
import {HierarchicalFilterEventBridge} from '../hierarchical-filter-event-bridge';
import {RemoveFilterIcon} from '../widgets';
import {Expression} from './expression';
import {ExpressionOperator} from './expression-operator';
import {ExpressionRight} from './expression-right';
import {ExpressionFilterContainer, ExpressionLeadLabel} from './widgets';
export const ExpressionFilterEdit = (props: {
subject: Subject;
report: Report;
parentJoint: ReportFilterJoint;
onRemoveMe: () => void;
notifyChangeToParent: () => void;
filter: ReportFilterExpression;
}) => {
const {
parentJoint, onRemoveMe, notifyChangeToParent,
subject, report, filter
} = props;
const onRemoveClicked = (event: MouseEvent<HTMLDivElement>) => {
event.preventDefault();
event.stopPropagation();
const index = parentJoint.filters.indexOf(filter);
if (index !== -1) {
parentJoint.filters.splice(index, 1);
onRemoveMe && onRemoveMe();
}
};
return <FilterEventBusProvider>
<ExpressionFilterContainer>
<ExpressionLeadLabel>{Lang.PARAMETER.EXPRESSION}</ExpressionLeadLabel>
<RemoveFilterIcon onClick={onRemoveClicked}>
<FontAwesomeIcon icon={ICON_DELETE}/>
</RemoveFilterIcon>
<Expression filter={filter} parameter={filter.left} availableKinds={[ParameterKind.TOPIC]}
subject={subject} report={report}
visible={true}/>
<ExpressionOperator filter={filter}/>
<ExpressionRight filter={filter} parameter={filter.right} subject={subject} report={report}/>
</ExpressionFilterContainer>
<HierarchicalFilterEventBridge notifyChangeToParent={notifyChangeToParent}/>
</FilterEventBusProvider>;
};
|
/**
* React to slays which hurt a monster
*
* \param obj_flags is the set of flags we're testing for slays
* \param mon_flags is the set of flags we're adjusting as a result
*/
void react_to_slay(bitflag *obj_flags, bitflag *mon_flags)
{
int i;
for (i = 0; i < SL_MAX; i++) {
const struct slay *s_ptr = &slay_table[i];
if (of_has(obj_flags, s_ptr->object_flag) && s_ptr->monster_flag)
rf_on(mon_flags, s_ptr->monster_flag);
}
}
|
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
///
/// \file This file wraps cl2.hpp and introduces wrapper classes for Intel sharing extensions.
///
#pragma once
#include <CL/opencl.hpp>
#define NOMINMAX
#ifdef _WIN32
#include <CL/cl_d3d11.h>
typedef cl_d3d11_device_source_khr cl_device_source_intel;
typedef cl_d3d11_device_set_khr cl_device_set_intel;
#else
#include <CL/cl_va_api_media_sharing_intel.h>
typedef cl_va_api_device_source_intel cl_device_source_intel;
typedef cl_va_api_device_set_intel cl_device_set_intel;
#endif
// cl_intel_required_subgroup_size
#define CL_DEVICE_SUB_GROUP_SIZES_INTEL 0x4108
// cl_intel_device_attribute_query
#define CL_DEVICE_IP_VERSION_INTEL 0x4250
#define CL_DEVICE_ID_INTEL 0x4251
#define CL_DEVICE_NUM_SLICES_INTEL 0x4252
#define CL_DEVICE_NUM_SUB_SLICES_PER_SLICE_INTEL 0x4253
#define CL_DEVICE_NUM_EUS_PER_SUB_SLICE_INTEL 0x4254
#define CL_DEVICE_NUM_THREADS_PER_EU_INTEL 0x4255
#define CL_DEVICE_FEATURE_CAPABILITIES_INTEL 0x4256
typedef cl_bitfield cl_device_feature_capabilities_intel;
/* For GPU devices, version 1.0.0: */
#define CL_DEVICE_FEATURE_FLAG_DP4A_INTEL (1 << 0)
#define CL_DEVICE_FEATURE_FLAG_DPAS_INTEL (1 << 1)
#define CL_HPP_PARAM_NAME_CL_INTEL_COMMAND_QUEUE_FAMILIES_(F) \
F(cl_device_info, CL_DEVICE_QUEUE_FAMILY_PROPERTIES_INTEL, cl::vector<cl_queue_family_properties_intel>) \
\
F(cl_command_queue_info, CL_QUEUE_FAMILY_INTEL, cl_uint) \
F(cl_command_queue_info, CL_QUEUE_INDEX_INTEL, cl_uint)
namespace cl {
namespace detail {
CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SUB_GROUP_SIZES_INTEL, vector<size_type>)
CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_IP_VERSION_INTEL, cl_uint)
CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_ID_INTEL, cl_uint)
CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_NUM_SLICES_INTEL, cl_uint)
CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_NUM_SUB_SLICES_PER_SLICE_INTEL, cl_uint)
CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_NUM_EUS_PER_SUB_SLICE_INTEL, cl_uint)
CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_NUM_THREADS_PER_EU_INTEL, cl_uint)
CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_FEATURE_CAPABILITIES_INTEL, cl_device_feature_capabilities_intel)
CL_HPP_PARAM_NAME_CL_INTEL_COMMAND_QUEUE_FAMILIES_(CL_HPP_DECLARE_PARAM_TRAITS_)
} // namespace detail
} // namespace cl
#include <memory>
namespace {
template <typename T>
T load_entrypoint(const cl_platform_id platform, const std::string name) {
#if defined(__GNUC__) && __GNUC__ < 5
// OCL spec says:
// "The function clGetExtensionFunctionAddressForPlatform returns the address of the extension function named by funcname for a given platform.
// The pointer returned should be cast to a function pointer type matching the extension function's definition defined in the appropriate extension
// specification and header file."
// So the pointer-to-object to pointer-to-function cast below is supposed to be valid, thus we suppress warning from old GCC versions.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
T p = reinterpret_cast<T>(clGetExtensionFunctionAddressForPlatform(platform, name.c_str()));
#if defined(__GNUC__) && __GNUC__ < 5
#pragma GCC diagnostic pop
#endif
if (!p) {
throw std::runtime_error("clGetExtensionFunctionAddressForPlatform(" + name + ") returned NULL.");
}
return p;
}
template <typename T>
T load_entrypoint(const cl_device_id device, const std::string name) {
cl_platform_id platform;
cl_int error = clGetDeviceInfo(device, CL_DEVICE_PLATFORM, sizeof(platform), &platform, nullptr);
if (error) {
throw std::runtime_error("Failed to retrieve CL_DEVICE_PLATFORM: " + std::to_string(error));
}
return load_entrypoint<T>(platform, name);
}
// loader functions created for single device contexts
// ToDo Extend it for multi device case.
template <typename T>
T load_entrypoint(const cl_context context, const std::string name) {
size_t size = 0;
cl_int error = clGetContextInfo(context, CL_CONTEXT_DEVICES, 0, nullptr, &size);
if (error) {
throw std::runtime_error("Failed to retrieve CL_CONTEXT_DEVICES size: " + std::to_string(error));
}
std::vector<cl_device_id> devices(size / sizeof(cl_device_id));
error = clGetContextInfo(context, CL_CONTEXT_DEVICES, size, devices.data(), nullptr);
if (error) {
throw std::runtime_error("Failed to retrieve CL_CONTEXT_DEVICES: " + std::to_string(error));
}
return load_entrypoint<T>(devices.front(), name);
}
template <typename T>
T try_load_entrypoint(const cl_context context, const std::string name) {
try {
return load_entrypoint<T>(context, name);
} catch (...) {
return nullptr;
}
}
template <typename T>
T try_load_entrypoint(const cl_platform_id platform, const std::string name) {
try {
return load_entrypoint<T>(platform, name);
} catch (...) {
return nullptr;
}
}
template <typename T>
T load_entrypoint(const cl_kernel kernel, const std::string name) {
cl_context context;
cl_int error = clGetKernelInfo(kernel, CL_KERNEL_CONTEXT, sizeof(context),
&context, nullptr);
if (error) {
throw std::runtime_error("Failed to retrieve CL_KERNEL_CONTEXT: " +
std::to_string(error));
}
return load_entrypoint<T>(context, name);
}
template <typename T>
T load_entrypoint(const cl_command_queue queue, const std::string name) {
cl_context context;
cl_int error = clGetCommandQueueInfo(queue, CL_QUEUE_CONTEXT, sizeof(context),
&context, nullptr);
if (error) {
throw std::runtime_error("Failed to retrieve CL_QUEUE_CONTEXT: " +
std::to_string(error));
}
return load_entrypoint<T>(context, name);
}
} // namespace
namespace cl {
typedef CL_API_ENTRY cl_int(CL_API_CALL *PFN_clEnqueueAcquireMediaSurfacesINTEL)(
cl_command_queue /* command_queue */,
cl_uint /* num_objects */,
const cl_mem* /* mem_objects */,
cl_uint /* num_events_in_wait_list */,
const cl_event* /* event_wait_list */,
cl_event* /* event */);
typedef CL_API_ENTRY cl_int(CL_API_CALL *PFN_clEnqueueReleaseMediaSurfacesINTEL)(
cl_command_queue /* command_queue */,
cl_uint /* num_objects */,
const cl_mem* /* mem_objects */,
cl_uint /* num_events_in_wait_list */,
const cl_event* /* event_wait_list */,
cl_event* /* event */);
typedef CL_API_ENTRY cl_mem(CL_API_CALL * PFN_clCreateFromMediaSurfaceINTEL)(
cl_context /* context */,
cl_mem_flags /* flags */,
void* /* surface */,
cl_uint /* plane */,
cl_int* /* errcode_ret */);
#ifdef WIN32
typedef CL_API_ENTRY cl_mem(CL_API_CALL * PFN_clCreateFromD3D11Buffer)(
cl_context context,
cl_mem_flags flags,
void* resource, cl_int* errcode_ret);
#endif
class SharedSurfLock {
cl_command_queue m_queue;
std::vector<cl_mem> m_surfaces;
cl_int* m_errPtr;
public:
static PFN_clEnqueueAcquireMediaSurfacesINTEL pfn_acquire;
static PFN_clEnqueueReleaseMediaSurfacesINTEL pfn_release;
static void Init(cl_platform_id platform) {
#ifdef WIN32
const char* fnameAcq = "clEnqueueAcquireD3D11ObjectsKHR";
const char* fnameRel = "clEnqueueReleaseD3D11ObjectsKHR";
#else
const char* fnameAcq = "clEnqueueAcquireVA_APIMediaSurfacesINTEL";
const char* fnameRel = "clEnqueueReleaseVA_APIMediaSurfacesINTEL";
#endif
if (!pfn_acquire) {
pfn_acquire = try_load_entrypoint<PFN_clEnqueueAcquireMediaSurfacesINTEL>(platform, fnameAcq);
}
if (!pfn_release) {
pfn_release = try_load_entrypoint<PFN_clEnqueueReleaseMediaSurfacesINTEL>(platform, fnameRel);
}
}
SharedSurfLock(cl_command_queue queue,
std::vector<cl_mem>& surfaces,
cl_int * err = NULL)
: m_queue(queue), m_surfaces(surfaces), m_errPtr(err) {
if (pfn_acquire != NULL && m_surfaces.size()) {
cl_int error = pfn_acquire(m_queue,
static_cast<cl_uint>(m_surfaces.size()),
m_surfaces.data(),
0, NULL, NULL);
if (error != CL_SUCCESS && m_errPtr != NULL) {
*m_errPtr = error;
}
}
}
~SharedSurfLock() {
if (pfn_release != NULL && m_surfaces.size()) {
cl_int error = pfn_release(m_queue,
static_cast<cl_uint>(m_surfaces.size()),
m_surfaces.data(),
0, NULL, NULL);
if (error != CL_SUCCESS && m_errPtr != NULL) {
*m_errPtr = error;
}
}
}
};
class ImageVA : public Image2D {
public:
static PFN_clCreateFromMediaSurfaceINTEL pfn_clCreateFromMediaSurfaceINTEL;
static void Init(cl_platform_id platform) {
#ifdef WIN32
const char* fname = "clCreateFromD3D11Texture2DKHR";
#else
const char* fname = "clCreateFromVA_APIMediaSurfaceINTEL";
#endif
if (!pfn_clCreateFromMediaSurfaceINTEL) {
pfn_clCreateFromMediaSurfaceINTEL = try_load_entrypoint<PFN_clCreateFromMediaSurfaceINTEL>((cl_platform_id)platform, fname);
}
}
/*! \brief Constructs a ImageVA, in a specified context, from a
* given vaSurfaceID.
*
* Wraps clCreateFromMediaSurfaceINTEL().
*/
ImageVA(
const Context& context,
cl_mem_flags flags,
#ifdef WIN32
void* surface,
#else
uint32_t surface,
#endif
uint32_t plane,
cl_int * err = NULL) {
cl_int error;
object_ = pfn_clCreateFromMediaSurfaceINTEL(
context(),
flags,
#ifdef WIN32
surface,
#else
reinterpret_cast<void*>(&surface),
#endif
plane,
&error);
detail::errHandler(error);
if (err != NULL) {
*err = error;
}
}
//! \brief Default constructor - initializes to NULL.
ImageVA() : Image2D() { }
/*! \brief Constructor from cl_mem - takes ownership.
*
* \param retainObject will cause the constructor to retain its cl object.
* Defaults to false to maintain compatibility with
* earlier versions.
* See Memory for further details.
*/
explicit ImageVA(const cl_mem& image, bool retainObject = false) :
Image2D(image, retainObject) { }
/*! \brief Assignment from cl_mem - performs shallow copy.
*
* See Memory for further details.
*/
ImageVA& operator = (const cl_mem& rhs) {
Image2D::operator=(rhs);
return *this;
}
/*! \brief Copy constructor to forward copy to the superclass correctly.
* Required for MSVC.
*/
ImageVA(const ImageVA& img) :
Image2D(img) {}
/*! \brief Copy assignment to forward copy to the superclass correctly.
* Required for MSVC.
*/
ImageVA& operator = (const ImageVA &img) {
Image2D::operator=(img);
return *this;
}
/*! \brief Move constructor to forward move to the superclass correctly.
* Required for MSVC.
*/
ImageVA(ImageVA&& buf) noexcept : Image2D(std::move(buf)) {}
/*! \brief Move assignment to forward move to the superclass correctly.
* Required for MSVC.
*/
ImageVA& operator = (ImageVA &&buf) {
Image2D::operator=(std::move(buf));
return *this;
}
};
#ifdef WIN32
class BufferDX : public Buffer {
public:
static PFN_clCreateFromD3D11Buffer pfn_clCreateFromD3D11Buffer;
static void Init(cl_platform_id platform) {
const char* fname = "clCreateFromD3D11BufferKHR";
if (!pfn_clCreateFromD3D11Buffer) {
pfn_clCreateFromD3D11Buffer = try_load_entrypoint<PFN_clCreateFromD3D11Buffer>((cl_platform_id)platform, fname);
}
}
BufferDX(
const Context& context,
cl_mem_flags flags,
void* resource,
cl_int * err = NULL) {
cl_int error;
ID3D11Buffer* buffer = static_cast<ID3D11Buffer*>(resource);
object_ = pfn_clCreateFromD3D11Buffer(
context(),
flags,
buffer,
&error);
detail::errHandler(error);
if (err != NULL) {
*err = error;
}
}
//! \brief Default constructor - initializes to NULL.
BufferDX() : Buffer() { }
/*! \brief Constructor from cl_mem - takes ownership.
*
* \param retainObject will cause the constructor to retain its cl object.
* Defaults to false to maintain compatibility with
* earlier versions.
* See Memory for further details.
*/
explicit BufferDX(const cl_mem& buf, bool retainObject = false) :
Buffer(buf, retainObject) { }
/*! \brief Assignment from cl_mem - performs shallow copy.
*
* See Memory for further details.
*/
BufferDX& operator = (const cl_mem& rhs) {
Buffer::operator=(rhs);
return *this;
}
/*! \brief Copy constructor to forward copy to the superclass correctly.
* Required for MSVC.
*/
BufferDX(const BufferDX& buf) :
Buffer(buf) {}
/*! \brief Copy assignment to forward copy to the superclass correctly.
* Required for MSVC.
*/
BufferDX& operator = (const BufferDX &buf) {
Buffer::operator=(buf);
return *this;
}
/*! \brief Move constructor to forward move to the superclass correctly.
* Required for MSVC.
*/
BufferDX(BufferDX&& buf) noexcept : Buffer(std::move(buf)) {}
/*! \brief Move assignment to forward move to the superclass correctly.
* Required for MSVC.
*/
BufferDX& operator = (BufferDX &&buf) {
Buffer::operator=(std::move(buf));
return *this;
}
};
#endif
class PlatformVA : public Platform {
public:
//! \brief Default constructor - initializes to NULL.
PlatformVA() : Platform() { }
explicit PlatformVA(const cl_platform_id &platform, bool retainObject = false) :
Platform(platform, retainObject) { }
cl_int getDevices(
cl_device_source_intel media_adapter_type,
void * media_adapter,
cl_device_set_intel media_adapter_set,
vector<Device>* devices) const {
typedef CL_API_ENTRY cl_int(CL_API_CALL * PFN_clGetDeviceIDsFromMediaAdapterINTEL)(
cl_platform_id /* platform */,
cl_device_source_intel /* media_adapter_type */,
void * /* media_adapter */,
cl_device_set_intel /* media_adapter_set */,
cl_uint /* num_entries */,
cl_device_id * /* devices */,
cl_uint * /* num_devices */);
#ifdef WIN32
const char* fname = "clGetDeviceIDsFromD3D11KHR";
#else
const char* fname = "clGetDeviceIDsFromVA_APIMediaAdapterINTEL";
#endif
if (devices == NULL) {
return detail::errHandler(CL_INVALID_ARG_VALUE, fname);
}
PFN_clGetDeviceIDsFromMediaAdapterINTEL pfn_clGetDeviceIDsFromMediaAdapterINTEL =
try_load_entrypoint<PFN_clGetDeviceIDsFromMediaAdapterINTEL>(object_, fname);
if (NULL == pfn_clGetDeviceIDsFromMediaAdapterINTEL) {
return CL_INVALID_PLATFORM;
}
cl_uint n = 0;
cl_int err = pfn_clGetDeviceIDsFromMediaAdapterINTEL(
object_,
media_adapter_type,
media_adapter,
media_adapter_set,
0,
NULL,
&n);
if (err != CL_SUCCESS && err != CL_DEVICE_NOT_FOUND) {
return detail::errHandler(err, fname);
}
if (err != CL_DEVICE_NOT_FOUND) {
vector<cl_device_id> ids(n);
err = pfn_clGetDeviceIDsFromMediaAdapterINTEL(
object_,
media_adapter_type,
media_adapter,
media_adapter_set,
n,
ids.data(),
NULL);
if (err != CL_SUCCESS) {
return detail::errHandler(err, fname);
}
// Cannot trivially assign because we need to capture intermediates
// with safe construction
// We must retain things we obtain from the API to avoid releasing
// API-owned objects.
if (devices) {
devices->resize(ids.size());
// Assign to param, constructing with retain behaviour
// to correctly capture each underlying CL object
for (size_type i = 0; i < ids.size(); i++) {
(*devices)[i] = Device(ids[i], true);
}
}
// set up acquire/release extensions
SharedSurfLock::Init(object_);
ImageVA::Init(object_);
#ifdef WIN32
BufferDX::Init(object_);
#endif
}
return CL_SUCCESS;
}
};
class UsmHelper {
public:
explicit UsmHelper(const cl::Context& ctx, const cl::Device device, bool use_usm) : _ctx(ctx), _device(device) {
if (use_usm) {
_host_mem_alloc_fn = try_load_entrypoint<clHostMemAllocINTEL_fn>(_ctx.get(), "clHostMemAllocINTEL");
_shared_mem_alloc_fn = try_load_entrypoint<clSharedMemAllocINTEL_fn>(_ctx.get(), "clSharedMemAllocINTEL");
_device_mem_alloc_fn = try_load_entrypoint<clDeviceMemAllocINTEL_fn>(_ctx.get(), "clDeviceMemAllocINTEL");
_mem_free_fn = try_load_entrypoint<clMemFreeINTEL_fn>(_ctx.get(), "clMemFreeINTEL");
_set_kernel_arg_mem_pointer_fn = try_load_entrypoint<clSetKernelArgMemPointerINTEL_fn>(_ctx.get(), "clSetKernelArgMemPointerINTEL");
_enqueue_memcpy_fn = try_load_entrypoint<clEnqueueMemcpyINTEL_fn>(_ctx.get(), "clEnqueueMemcpyINTEL");
_enqueue_mem_fill_fn = try_load_entrypoint<clEnqueueMemFillINTEL_fn>(_ctx.get(), "clEnqueueMemFillINTEL");
_enqueue_memset_fn = try_load_entrypoint<clEnqueueMemsetINTEL_fn>(_ctx.get(), "clEnqueueMemsetINTEL");
_get_mem_alloc_info_fn = try_load_entrypoint<clGetMemAllocInfoINTEL_fn>(_ctx.get(), "clGetMemAllocInfoINTEL");
}
}
void* allocate_host(const cl_mem_properties_intel *properties, size_t size, cl_uint alignment, cl_int* err_code_ret) const {\
if (!_host_mem_alloc_fn)
throw std::runtime_error("[CLDNN] clHostMemAllocINTEL is nullptr");
return _host_mem_alloc_fn(_ctx.get(), properties, size, alignment, err_code_ret);
}
void* allocate_shared(const cl_mem_properties_intel *properties, size_t size, cl_uint alignment, cl_int* err_code_ret) const {
if (!_shared_mem_alloc_fn)
throw std::runtime_error("[CLDNN] clSharedMemAllocINTEL is nullptr");
return _shared_mem_alloc_fn(_ctx.get(), _device.get(), properties, size, alignment, err_code_ret);
}
void* allocate_device(const cl_mem_properties_intel *properties, size_t size, cl_uint alignment, cl_int* err_code_ret) const {
if (!_device_mem_alloc_fn)
throw std::runtime_error("[CLDNN] clDeviceMemAllocINTEL is nullptr");
return _device_mem_alloc_fn(_ctx.get(), _device.get(), properties, size, alignment, err_code_ret);
}
void free_mem(void* ptr) const {
if (!_mem_free_fn)
throw std::runtime_error("[CLDNN] clMemFreeINTEL is nullptr");
_mem_free_fn(_ctx.get(), ptr);
}
cl_int set_kernel_arg_mem_pointer(const cl::Kernel& kernel, uint32_t index, const void* ptr) const {
if (!_set_kernel_arg_mem_pointer_fn)
throw std::runtime_error("[CLDNN] clSetKernelArgMemPointerINTEL is nullptr");
return _set_kernel_arg_mem_pointer_fn(kernel.get(), index, ptr);
}
cl_int enqueue_memcpy(const cl::CommandQueue& cpp_queue, void *dst_ptr, const void *src_ptr,
size_t bytes_count, bool blocking = true, const std::vector<cl::Event>* wait_list = nullptr, cl::Event* ret_event = nullptr) const {
if (!_enqueue_memcpy_fn)
throw std::runtime_error("[CLDNN] clEnqueueMemcpyINTEL is nullptr");
cl_event tmp;
cl_int err = _enqueue_memcpy_fn(
cpp_queue.get(),
static_cast<cl_bool>(blocking),
dst_ptr,
src_ptr,
bytes_count,
wait_list == nullptr ? 0 : static_cast<cl_uint>(wait_list->size()),
wait_list == nullptr ? nullptr : reinterpret_cast<const cl_event*>(&wait_list->front()),
ret_event == nullptr ? nullptr : &tmp);
if (ret_event != nullptr && err == CL_SUCCESS)
*ret_event = tmp;
return err;
}
cl_int enqueue_fill_mem(const cl::CommandQueue& cpp_queue, void *dst_ptr, const void* pattern,
size_t pattern_size, size_t bytes_count, const std::vector<cl::Event>* wait_list = nullptr,
cl::Event* ret_event = nullptr) const {
if (!_enqueue_mem_fill_fn)
throw std::runtime_error("[CLDNN] clEnqueueMemFillINTEL is nullptr");
cl_event tmp;
cl_int err = _enqueue_mem_fill_fn(
cpp_queue.get(),
dst_ptr,
pattern,
pattern_size,
bytes_count,
wait_list == nullptr ? 0 : static_cast<cl_uint>(wait_list->size()),
wait_list == nullptr ? nullptr : reinterpret_cast<const cl_event*>(&wait_list->front()),
ret_event == nullptr ? nullptr : &tmp);
if (ret_event != nullptr && err == CL_SUCCESS)
*ret_event = tmp;
return err;
}
cl_int enqueue_set_mem(const cl::CommandQueue& cpp_queue, void* dst_ptr, cl_int value,
size_t bytes_count, const std::vector<cl::Event>* wait_list = nullptr,
cl::Event* ret_event = nullptr) const {
if (!_enqueue_memset_fn)
throw std::runtime_error("[CLDNN] clEnqueueMemsetINTEL is nullptr");
cl_event tmp;
cl_int err = _enqueue_memset_fn(
cpp_queue.get(),
dst_ptr,
value,
bytes_count,
wait_list == nullptr ? 0 : static_cast<cl_uint>(wait_list->size()),
wait_list == nullptr ? nullptr : reinterpret_cast<const cl_event*>(&wait_list->front()),
ret_event == nullptr ? nullptr : &tmp);
if (ret_event != nullptr && err == CL_SUCCESS)
*ret_event = tmp;
return err;
}
cl_unified_shared_memory_type_intel get_usm_allocation_type(const void* usm_ptr) const {
if (!_get_mem_alloc_info_fn) {
throw std::runtime_error("[GPU] clGetMemAllocInfoINTEL is nullptr");
}
cl_unified_shared_memory_type_intel ret_val;
size_t ret_val_size;
_get_mem_alloc_info_fn(_ctx.get(), usm_ptr, CL_MEM_ALLOC_TYPE_INTEL, sizeof(cl_unified_shared_memory_type_intel), &ret_val, &ret_val_size);
return ret_val;
}
private:
cl::Context _ctx;
cl::Device _device;
clHostMemAllocINTEL_fn _host_mem_alloc_fn = nullptr;
clMemFreeINTEL_fn _mem_free_fn = nullptr;
clSharedMemAllocINTEL_fn _shared_mem_alloc_fn = nullptr;
clDeviceMemAllocINTEL_fn _device_mem_alloc_fn = nullptr;
clSetKernelArgMemPointerINTEL_fn _set_kernel_arg_mem_pointer_fn = nullptr;
clEnqueueMemcpyINTEL_fn _enqueue_memcpy_fn = nullptr;
clEnqueueMemFillINTEL_fn _enqueue_mem_fill_fn = nullptr;
clEnqueueMemsetINTEL_fn _enqueue_memset_fn = nullptr;
clGetMemAllocInfoINTEL_fn _get_mem_alloc_info_fn = nullptr;
};
/*
UsmPointer requires associated context to free it.
Simple wrapper class for usm allocated pointer.
*/
class UsmHolder {
public:
UsmHolder(const cl::UsmHelper& usmHelper, void* ptr, bool shared_memory = false)
: _usmHelper(usmHelper)
, _ptr(ptr)
, _shared_memory(shared_memory) { }
void* ptr() { return _ptr; }
~UsmHolder() {
try {
if (!_shared_memory)
_usmHelper.free_mem(_ptr);
} catch (...) {
// Exception may happen only when clMemFreeINTEL function is unavailable, thus can't free memory properly
}
}
private:
const cl::UsmHelper& _usmHelper;
void* _ptr;
bool _shared_memory = false;
};
/*
USM base class. Different usm types should derive from this class.
*/
class UsmMemory {
public:
explicit UsmMemory(const cl::UsmHelper& usmHelper) : _usmHelper(usmHelper) { }
UsmMemory(const cl::UsmHelper& usmHelper, void* usm_ptr)
: _usmHelper(usmHelper)
, _usm_pointer(std::make_shared<UsmHolder>(_usmHelper, usm_ptr, true)) {
if (!usm_ptr) {
throw std::runtime_error("[GPU] Can't share null usm pointer");
}
}
// Get methods returns original pointer allocated by openCL.
void* get() const { return _usm_pointer->ptr(); }
void allocateHost(size_t size) {
cl_int error = CL_SUCCESS;
_allocate(_usmHelper.allocate_host(nullptr, size, 0, &error));
if (error != CL_SUCCESS)
detail::errHandler(error, "[CL_EXT] UsmHost in cl extensions constructor failed");
}
void allocateShared(size_t size) {
cl_int error = CL_SUCCESS;
_allocate(_usmHelper.allocate_shared(nullptr, size, 0, &error));
if (error != CL_SUCCESS)
detail::errHandler(error, "[CL_EXT] UsmShared in cl extensions constructor failed");
}
void allocateDevice(size_t size) {
cl_int error = CL_SUCCESS;
_allocate(_usmHelper.allocate_device(nullptr, size, 0, &error));
if (error != CL_SUCCESS)
detail::errHandler(error, "[CL_EXT] UsmDevice in cl extensions constructor failed");
}
virtual ~UsmMemory() = default;
protected:
const UsmHelper& _usmHelper;
std::shared_ptr<UsmHolder> _usm_pointer = nullptr;
private:
void _allocate(void* ptr) {
if (!ptr)
throw std::runtime_error("[CL ext] Can not allocate nullptr for USM type.");
_usm_pointer = std::make_shared<UsmHolder>(_usmHelper, ptr);
}
};
/*
Wrapper for standard cl::Kernel object.
Extend cl::Kernel functionality.
*/
class KernelIntel : public Kernel {
using Kernel::Kernel;
public:
explicit KernelIntel(const UsmHelper& usmHelper) : Kernel(), _usmHelper(usmHelper) {}
KernelIntel(const Kernel &other, const UsmHelper& usmHelper) : Kernel(other), _usmHelper(usmHelper) { }
KernelIntel clone() const {
Kernel cloned_kernel(this->getInfo<CL_KERNEL_PROGRAM>(), this->getInfo<CL_KERNEL_FUNCTION_NAME>().c_str());
return KernelIntel(cloned_kernel, _usmHelper);
}
cl_int setArgUsm(cl_uint index, const UsmMemory& mem) {
return detail::errHandler(_usmHelper.set_kernel_arg_mem_pointer(*this, index, mem.get()), "[CL_EXT] setArgUsm in KernelIntel failed");
}
private:
const UsmHelper& _usmHelper;
};
inline bool operator==(const UsmMemory &lhs, const UsmMemory &rhs) {
return lhs.get() == rhs.get();
}
inline bool operator!=(const UsmMemory &lhs, const UsmMemory &rhs) {
return !operator==(lhs, rhs);
}
} //namespace cl
|
def docs_setup():
import logging
import warnings
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=UserWarning)
logger = logging.getLogger("theano.gof.compilelock")
logger.setLevel(logging.ERROR)
logger = logging.getLogger("theano.tensor.opt")
logger.setLevel(logging.ERROR)
logger = logging.getLogger("exoplanet")
logger.setLevel(logging.DEBUG)
plt.style.use("default")
plt.rcParams["savefig.dpi"] = 100
plt.rcParams["figure.dpi"] = 100
plt.rcParams["font.size"] = 16
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = ["Liberation Sans"]
plt.rcParams["font.cursive"] = ["Liberation Sans"]
plt.rcParams["mathtext.fontset"] = "custom"
|
/**
* [@enum] MACaptionAppearanceDomain
*
* MACaptionAppearanceDomain is used to specify which domain of preferences to access.
*
* [@constant] kMACaptionAppearanceDomainUser
* The user-defined preferences. This domain should be passed to retrieve settings that should be used to render captions.
* [@constant] kMACaptionAppearanceDomainDefault
* The system defaults which will not change during a user session. These settings should be used for comparison, not to render captions. For example, when rendering a glyph some caption render engines need to get the user-defined font and the system-defined font to determeine the final pixel size to render a glyph. Most render engines will never need to use this domain.
*/
@Generated
public final class MACaptionAppearanceDomain {
@Generated @NInt public static final long Default = 0x0000000000000000L;
@Generated @NInt public static final long User = 0x0000000000000001L;
@Generated
private MACaptionAppearanceDomain() {
}
}
|
/**
* Get all edges owned by the given network.
*
* @param owner The network to query for edges.
* @return A set of all the owners edges known to the graph.
*/
public ArrayList<Action> GetDependents(Network owner) {
HashMap<Edge, HashSet<Action>> d = dependents.get(owner);
if (d != null) {
return (ArrayList<Action>)
d.values().stream().flatMap(Collection::stream).collect(Collectors.toList());
} else {
return new ArrayList<>(0);
}
}
|
Violent clashes broke out near a rally for Donald Trump’s presidential campaign in St Louis on Friday. Thirty-two people were arrested and at least one bloodied person was carried into an ambulance amid demonstrations by supporters and opponents of the Republican frontrunner.
Trump campaign dogged by violent incidents at rallies Read more
Violence has become an increasingly common feature of Trump’s rallies. This week, a Trump supporter sucker-punched a protester being escorted out of a venue by police and Michelle Fields, a reporter for a conservative news site Breitbart, alleged that Trump’s campaign manager assaulted her.
Though Trump has said he discourages violence at his rallies, the Republican frontrunner has encouraged it at times. He recently responding to a protester by asking supporters: “Knock the crap out of him, would you? Seriously, OK, just knock the hell. I promise you I will pay for the legal fees, I promise, I promise.”
The clashes on Friday came as hundreds of people gathered around the Peabody opera house in downtown St Louis, many having been turned away from the over-capacity rally while others were there to protest. Videos posted to social media showed Trump supporters hurling racial slurs and anti-Islamic remarks at protesters and reporters.
St Louis police tackled and took away two men with their hands tied. One, who had a bloodied face, gave his name to MSNBC as Anthony Cage. He was taken into an ambulance, telling police: “I can’t breathe.”
Protesters held signs declaring Trump a racist, and alluding to his remarks that Mexico is deliberately sending “rapists” and “criminals” into the US as well as his support for a wall on the border with Mexico.
Trump supporters shouted back: “There’s nothing racist about him at all.”
There were 32 total arrests, a spokeswoman for the St Louis metropolitan police department said. One person outside the venue was charged with assault and 31 people were charged with general peace disturbance. Of the people arrested, 27 were issued summonses and released, and five were booked.
Facebook Twitter Pinterest Protesters gather outside of Trump’s St Louis rally. Photograph: Michael B. Thomas/AFP/Getty Images
In several places, screaming matches nearly turned into violence. In one case, people dragged a white man away from a black woman after he became furious for having been touched and pushed her back. In another, a white man in a baseball camp gestured “come here” to protesters while shouting: “Do one of y’all motherfuckers want to go? C’mon: bring it! Bring it!”
Elsewhere, police broke up altercations and stray protesters on each side shouted at each other. Slogans included “No room for hate”, “fuck Donald Trump”, “fuck Islam” and “I’ve got 100 bucks for whoever knocks that bitch out”.
St Louis police appear to have been on high alert; recent racially charged protests against police violence in the city and its suburbs clearly colored protests against Trump: “black lives matter” signs and T-shirts were scattered throughout the picket lines.
“All he is, is the KKK in street clothes,” said D’Angelo White, 19 and from St Louis, of Trump. Though the Trump campaign was holding a rally in St Louis, White said, “he don’t give a fuck about nobody here”.
White and three of his friends were wearing blue-and-white starred bandanas over their faces as they walked away from the protest, which they said ended about 2pm. They said they had seen two or three protesters arrested, but the standoff was mostly screaming and tough-talking, not physical confrontation.
White said he was frustrated that Trump supporters kept telling protesters to get a job. He said he worked 40 hours a week for $9.77 an hour. At the protest, he said, “a 12-year-old told me to go get a job”.
“I got called a nigger five times,” said Najda Stroughter, a 20-year-old from St Louis. She said she was shouting “Fuck Trump!” and that men and women shouted “Shut up, nigger!” back at her.
“I’m sure we’ll be on the news, called thugs,” she said.
The surreality of an unusual campaign season also made itself felt: a man in a Spider-Man suit marched alongside Bernie Sanders supporters while a white-robed evangelist in a fisherman’s hat preached near a pro-Trump “Cardinal Cowboy”.
But a few dozen protesters, many of them black St Louis residents, lingered outside the opera house after the rally, engaging in heated but civil debates with a group of white Trump supporters from Jefferson City.
One of those supporters, Rudy Kelsey, 50, walked away from the conversation when one protester shouted that black people cannot be racist.
“I learned that black people still feel very discriminated against,” he said. “I told them them racism work both ways, and they said black people cannot be racist, and I said the dialogue’s over when you say that.”
Kelsey told the Guardian he had been discriminated against as a white man, because he was raised Amish.
“My ancestors fled Europe because of persecution,” he said. “My ancestors had their heads chopped off. They were burned at the stake and drowned.”
Growing up, he said, “we drove a horse and buggy instead of a car. We were Amish guys. We were the butt of every joke. My dad, he always raised us to be the best people that we could possibly be. Today I’m a successful self-made millionaire.”
Kelsey said he thought anyone could have that kind of success, “regardless of who you are or what your background is, but they obviously disagree with me very strongly out here. They say it’s still about color.”
While he was wearing a signed Trump hat, however, Kelsey said: “I’m not even sure I’m going to vote for him.” He said he had come to the rally out of curiosity; either way he was “absolutely” going to vote Republican.
In the aftermath of the protest, the only incident approaching violence saw a white protester grab a Make America Great Hat off the head of a Trump supporter, who was able to quickly retrieve it.
“That’s an autographed hat,” another white man said. “You know what I’m saying?”
|
<gh_stars>1-10
package com.rdc.rdcwelcome.utils;
import android.app.Activity;
import android.util.DisplayMetrics;
import android.view.WindowManager;
/**
* <pre>
* author : 残渊
* time : 2018/12/12
* desc : 屏幕相关工具类
* </pre>
*/
public class ScreenUtils {
/**
* * 获取屏幕宽度
* @return 屏幕宽度
*/
public static int getScreenWidth(Activity activity) {
WindowManager manager = activity.getWindowManager();
DisplayMetrics outMetrics = new DisplayMetrics();
manager.getDefaultDisplay().getMetrics(outMetrics);
return outMetrics.widthPixels;
}
/**
* 获取屏幕高度
* @return 屏幕高度
*/
public static int getScreenHeight(Activity activity) {
WindowManager manager = activity.getWindowManager();
DisplayMetrics outMetrics = new DisplayMetrics();
manager.getDefaultDisplay().getMetrics(outMetrics);
return outMetrics.heightPixels;
}
}
|
/** Tests execution and failure reporting of a plain AludraTest test class triggered by a JUnit suite. */
@Test
public void testFailingClass() {
System.setProperty(AludraTestJUnitSuite.SUITE_SYSPROP, FailingTestClass.class.getName());
Result result = JUnitCore.runClasses(TriggerSuite.class);
assertEquals(1, result.getRunCount());
assertEquals(1, result.getFailureCount());
List<Failure> failures = result.getFailures();
Failure failure = failures.get(0);
assertEquals("This is a failure!", failure.getMessage());
}
|
feature_count = 5
features = 0
def feature_enabled(bitmap, feature_bit):
return bool(bitmap & (1 << feature_bit))
for feature_bit in range(feature_count):
print("feature %s: %s" % (feature_bit, feature_enabled(features, feature_bit)))
|
<reponame>shedaniel/nbt-crafting
package de.siphalor.nbtcrafting.mixin;
import net.minecraft.item.ItemStack;
import org.spongepowered.asm.mixin.Mixin;
import org.spongepowered.asm.mixin.injection.At;
import org.spongepowered.asm.mixin.injection.Inject;
import org.spongepowered.asm.mixin.injection.callback.CallbackInfoReturnable;
@Mixin(ItemStack.class)
public class MixinItemStack {
@Inject(method = "areTagsEqual", at = @At("HEAD"), cancellable = true)
private static void areTagsEqual(ItemStack stack1, ItemStack stack2, CallbackInfoReturnable<Boolean> callbackInfoReturnable) {
if(!stack1.hasTag() && !stack2.hasTag())
callbackInfoReturnable.setReturnValue(true);
}
}
|
Media playback is unsupported on your device Media caption Matthew Price on the Jordanian-Syrian border says he has heard "there is a growing humanitarian problem" in Deraa
At least 62 people have been killed across Syria as thousands of anti-government demonstrators rallied in several cities, activists say.
Witnesses and human right groups say many died when troops opened fire on protesters in the city of Deraa, where the unrest began in mid-March.
The security forces swamped the streets of the capital Damascus and tear gas was reportedly fired as prayers ended.
Protests shook a number of other cities including Homs, Latakia and Baniyas.
Witnesses said troops opened fire on villagers marching on Deraa, which has been under military blockade since Monday.
'Soldiers killed'
At least 33 civilians were killed in the southern city, the London-based Syrian Observatory for Human Rights told AFP news agency.
Twenty-seven civilians were killed in and around the northern city of Homs, and two more in the Mediterranean port of Latakia, said the group.
At the scene As the call to prayer rang out at the Umayyad (Great Mosque) in central Damascus, small groups of men gathered. Uniformed security was just visible on the surrounding roads. Once prayers had finished the crowd gathered on the square outside, chanting slogans against President Bashar al-Assad until the imam came out and called for calm. It took just five minutes for the square to empty. But I sensed the tension. One tourist had all his pictures deleted by plain-clothed men, probably officials. I went back to look inside the mosque, but was locked in by the gatekeepers, who released me after a brief exchange of words. But as I walked away I saw men with wooden sticks gathering in the main square. When I tried to take a photo someone hit me on the back of the head and told me to leave. As I left I spotted more small groups of men on the street and police stopping vehicles to check their identity.
Earlier, a medical source told Reuters news agency that 15 bullet-riddled civilians' bodies had been received at a Deraa hospital.
Syrian state media said meanwhile that four soldiers had been killed and two others captured in a "terrorist" attack on their post in Deraa on Friday. The report cannot be verified.
Activists estimate 500 people have died in the crackdown on dissent against the regime of President Bashar al-Assad.
In other developments:
The UN Human Rights Council in Geneva condemned violence by the Syrian authorities, in a US-led resolution by 26 votes to nine with seven abstentions
US officials said Washington would bring sanctions against the Syrian regime, freezing the assets of three top officials
European Union member states reached preliminary agreement to impose an arms embargo against Syria, diplomats said
As has become typical during months of protests in a number of Middle East countries, Friday was billed as a "day of rage" in Syria.
Tear gas was fired at protesters in the old Midan area of Damascus, as demonstrations also flared in the city's suburbs.
Image caption Protests have also been reported in the city of Qamishli in Syria's north
A BBC reporter in the capital, who cannot be named for security reasons, said a crowd gathered to shout anti-regime slogans after prayers at the city's Great Mosque.
An imam called for calm and the men dispersed. Our correspondent later saw unidentified men wielding wooden sticks in the main square and police stopping vehicles to check passengers' identities.
Demonstrations also erupted in the northern cities of Aleppo, Idlib, Deir Ezzor, Raqqah and Qamishli; the coastal cities of Baniyas and Tartous; the southern city of Suwayda and the western city of Hama.
There are unconfirmed reports of security forces shooting protesters dead in Homs and Latakia.
Hundreds of people in a northern Jordanian town, next to Syria, demonstrated in support of the protesters just over the border in Deraa, Jordanian police said.
'Fleeing over border'
More than 200 Syrians fleeing the security clampdown crossed the border into Turkey on Friday, Turkish media reported.
Ahead of the protests, a notice on the Facebook page Syrian Revolution 2011 called for a "Friday of Anger".
"To the youths of the revolution, tomorrow we will be in all the places, in all the streets," it said.
Protests were backed this week by the Muslim Brotherhood, the Islamist organisation crushed by President Assad's father in the 1980s and banned ever since.
"Chant with one voice for freedom and dignity," said a statement, thought to be the first direct appeal by the group. "Do not allow the tyrant to enslave you."
In Deraa, dozens of people are reported to have been shot dead in recent days.
Witnesses said water, communications and power had been cut off.
There are unconfirmed reports of divisions within the security forces and of soldiers refusing orders to fire on protesters.
On Wednesday, 200 members of Syria's ruling Baath party resigned after issuing an angry public statement denouncing the repression.
The resignations - mostly from around Deraa - follow those of 30 Baath officials from the city of Baniyas.
|
/**
* Insert/Update/Delete record in the database.
* This method should be used when one of the columns in the database is a spatial column
* @param sql String
* @return int
* @throws SQLException
*/
public int insertUpdateOrDeleteData(String sql, ArrayList<JGeometry> geometryList) throws java.sql.SQLException {
Connection conn = getConnection();
PreparedStatement ps = conn.prepareStatement(sql);
for(int i=0; i<geometryList.size(); ++i) {
STRUCT obj = JGeometry.store(geometryList.get(i), conn);
ps.setObject(i+1, obj);
}
boolean success = ps.execute();
freeConnection(conn);
if(success) return 1;
else return 0;
}
|
/**
* Helper method to generate an instance of a subclass of
* {@link LoadInstruction} based on the specified {@link Type} that will
* load the specified local variable
* @param index the JVM stack frame index of the variable that is to be
* loaded
* @param type the {@link Type} of the variable
* @return the generated {@link LoadInstruction}
*/
private static Instruction loadLocal(int index, Type type) {
if (type == Type.BOOLEAN) {
return new ILOAD(index);
} else if (type == Type.INT) {
return new ILOAD(index);
} else if (type == Type.SHORT) {
return new ILOAD(index);
} else if (type == Type.LONG) {
return new LLOAD(index);
} else if (type == Type.BYTE) {
return new ILOAD(index);
} else if (type == Type.CHAR) {
return new ILOAD(index);
} else if (type == Type.FLOAT) {
return new FLOAD(index);
} else if (type == Type.DOUBLE) {
return new DLOAD(index);
} else {
return new ALOAD(index);
}
}
|
// input() won't deal with GPU code, so it can be synchronous
fn input(&mut self, window: &mut glfw::Window, event: &glfw::WindowEvent) -> bool {
if self.imgui.should_render {
self.imgui.platform.handle_event(self.imgui.context.io_mut(), &window, &event);
}
match event {
glfw::WindowEvent::Key(Key::Escape, _, Action::Press, _) => {
window.set_should_close(true)
},
glfw::WindowEvent::Key(Key::F3, _, Action::Press, _) => {
self.imgui.should_render = !self.imgui.should_render;
},
glfw::WindowEvent::Size(w, h) => {
self.resize((*w, *h));
},
_ => {}
}
false
}
|
/**
* Parse one or more variable references. Returns null if an error occurred.
*/
private Variables parseVariables() throws CodeSyntaxException {
if (!matcher.variable()) {
return null;
}
StringView token = matcher.consume();
Variables vars = new Variables(token.repr());
while (matcher.variablesDelimiter()) {
matcher.consume();
if (matcher.finished()) {
return null;
}
if (matcher.pipe() || !matcher.variable()) {
return null;
}
vars.add(matcher.consume().repr());
}
boolean matchedPipe = matcher.peek(0, '|');
if (matchedPipe && matcher.peek(1, '|')) {
return null;
}
return vars;
}
|
/*
* Copyright (c) 2017 <NAME>
* Copyright (c) 2017 <NAME>
* Copyright (c) 2017 <NAME>
* Copyright (c) 2017 <NAME>
* Copyright (c) 2017 <NAME>
* Copyright (c) 2017 <NAME>
* Copyright (c) 2017 <NAME>
* Copyright (c) 2017 <NAME>
*
* This software may be modified and distributed under the terms of the MIT license. See the LICENSE file for details.
*/
package com.dp16.runamicghent.util;
import java.util.concurrent.atomic.AtomicInteger;
import static org.junit.Assert.fail;
/**
* Helper class for some common threading problems encountered in unit tests.
* Created by Nick on 28-3-2017.
*/
public class ThreadingUtils {
/**
* Waits until an atomic variable has a certain value or fails with the message "-failMessage- -timeout- ms" after a timeout.
*
* @param waitTimePerIteration How long the thread should sleep each waiting iteration (in ms)
* @param maxIterations How many waiting iterations there should be
* @param failMessage Message to pass to junit.fail() upon timeout
* @param atomic Variable that must be waited upon
* @param expectedValue Value above variable should be.
*/
public static void waitUntilAtomicVariableReachesValue(int waitTimePerIteration, int maxIterations, String failMessage, AtomicInteger atomic, int expectedValue) {
int i = 0;
while (true) {
if (i > maxIterations) {
fail(failMessage + waitTimePerIteration * maxIterations + " ms");
}
if (atomic.get() == expectedValue) {
return;
}
try {
Thread.sleep(waitTimePerIteration);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
i++;
}
}
/**
* Waits one second until an atomic variable has a certain value or fails with the message "-failMessage- 1000 ms" after a timeout.
* <p>
* Uses {@link #waitUntilAtomicVariableReachesValue(int, int, String, AtomicInteger, int)} internally with the first two arguments to 25 and 40.
*
* @param failMessage Message to pass to junit.fail() upon timeout
* @param atomic Variable that must be waited upon
* @param expectedValue Value above variable should be.
*/
public static void waitOneSecUntilAtomicVariableReachesValue(String failMessage, AtomicInteger atomic, int expectedValue) {
waitUntilAtomicVariableReachesValue(25, 40, failMessage, atomic, expectedValue);
}
}
|
/**
* Allows Display to process events, so UI can make progress. Tests running
* in the UI thread may need to call this to avoid UI deadlocks.
* <p>
* For convenience, it is allowed to call this method from a non UI thread,
* but such calls have no effect.
*/
public static void waitForDisplay() {
if (inUIThread()) {
while (Display.getDefault().readAndDispatch()) {
}
}
}
|
/**
* Will get all documents with missing ancestry information, and correct them
* excluding documents in the republished state
*/
private void processAssignAncestry() {
CoreInstance
.doPrivileged(Framework.getService(RepositoryManager.class).getDefaultRepositoryName(),
session -> {
String query = "SELECT * FROM FVWord, FVPhrase, FVBook WHERE "
+ " fva:dialect IS NULL AND "
+ " ecm:currentLifeCycleState NOT LIKE 'Republish' AND "
+ " ecm:isTrashed = 0 AND "
+ " ecm:isProxy = 0 AND "
+ " ecm:isVersion = 0";
long pageSize = 1000;
DocumentModelList documents = session.query(query, null, pageSize, 0, true);
if (documents == null || documents.isEmpty()) {
return;
}
log.warning(
() -> "GLOBAL JOB: Found " + documents.totalSize()
+ " docs to fix ancestry.");
populateAncestryAndPublish(session, documents);
session.save();
});
}
|
package token
import (
"context"
"database/sql"
"fmt"
"github.com/google/uuid"
"github.com/vardius/go-api-boilerplate/pkg/commandbus"
"github.com/vardius/go-api-boilerplate/pkg/errors"
"github.com/vardius/go-api-boilerplate/pkg/executioncontext"
oauth2 "gopkg.in/oauth2.v3"
)
// Remove command
type Remove struct {
ID uuid.UUID
}
// GetName returns command name
func (c Remove) GetName() string {
return fmt.Sprintf("%T", c)
}
// OnRemove creates command handler
func OnRemove(repository Repository, db *sql.DB) commandbus.CommandHandler {
fn := func(ctx context.Context, c Remove, out chan<- error) {
// this goroutine runs independently to request's goroutine,
// there for recover middlewears will not recover from panic to prevent crash
defer recoverCommandHandler(out)
token := repository.Get(c.ID)
err := token.Remove()
if err != nil {
out <- errors.Wrap(err, errors.INTERNAL, "Error when removing token")
return
}
out <- repository.Save(executioncontext.WithFlag(context.Background(), executioncontext.LIVE), token)
}
return commandbus.CommandHandler(fn)
}
// Create command
type Create struct {
TokenInfo oauth2.TokenInfo
}
// GetName returns command name
func (c Create) GetName() string {
return fmt.Sprintf("%T", c)
}
// OnCreate creates command handler
func OnCreate(repository Repository, db *sql.DB) commandbus.CommandHandler {
fn := func(ctx context.Context, c Create, out chan<- error) {
// this goroutine runs independently to request's goroutine,
// there for recover middlewears will not recover from panic to prevent crash
defer recoverCommandHandler(out)
id, err := uuid.NewRandom()
if err != nil {
out <- errors.Wrap(err, errors.INTERNAL, "Could not generate new id")
return
}
token := New()
err = token.Create(id, c.TokenInfo)
if err != nil {
out <- errors.Wrap(err, errors.INTERNAL, "Error when creating new token")
return
}
out <- repository.Save(executioncontext.WithFlag(context.Background(), executioncontext.LIVE), token)
}
return commandbus.CommandHandler(fn)
}
func recoverCommandHandler(out chan<- error) {
if r := recover(); r != nil {
out <- errors.Newf(errors.INTERNAL, "[CommandHandler] Recovered in %v", r)
}
}
|
.
A variant of a diurnal-nocturnal inpatient complex on the basis of a psychoneurological dispensary involving a short period of round-the-clock hospitalization of patients is described. This proved advisable in the treatment of three groups of patients (n = 260): (1) those with acute manifestations of the disease (to carry out intensive therapy), (2) those with subacute paroxysms and prolonged psychotic states (to conduct arresting therapy), (3) with conflicts in the microsocial environment (with the aim of their temporary isolation). Such an organizational form makes it possible to expand intensive therapy and increase a number of patients with acute psychotic manifestations treated outside the settings of a closed hospital.
|
// StartupForwardListener is called when a forward tunnel is to
// be listened for.
func (cfg *SshegoConfig) StartupForwardListener(ctx context.Context, sshClientConn *ssh.Client) error {
p("sshego: StartupForwardListener: about to listen on %s\n", cfg.LocalToRemote.Listen.Addr)
ln, err := net.ListenTCP("tcp", &net.TCPAddr{IP: net.ParseIP(cfg.LocalToRemote.Listen.Host), Port: int(cfg.LocalToRemote.Listen.Port)})
if err != nil {
return fmt.Errorf("could not -listen on %s: %s", cfg.LocalToRemote.Listen.Addr, err)
}
go func() {
for {
p("sshego: about to accept on local port %s\n", cfg.LocalToRemote.Listen.Addr)
timeoutMillisec := 10000
err = ln.SetDeadline(time.Now().Add(time.Duration(timeoutMillisec) * time.Millisecond))
panicOn(err)
fromBrowser, err := ln.Accept()
if err != nil {
if _, ok := err.(*net.OpError); ok {
continue
}
p("ln.Accept err = '%s' aka '%#v'\n", err, err)
panic(err)
}
if !cfg.Quiet {
log.Printf("sshego: accepted forward connection on %s, forwarding --> to sshd host %s, and thence --> to remote %s\n", cfg.LocalToRemote.Listen.Addr, cfg.SSHdServer.Addr, cfg.LocalToRemote.Remote.Addr)
}
NewForward(ctx, cfg, sshClientConn, fromBrowser)
}
}()
return nil
}
|
<reponame>jingetiema2100/MicroCommunity<gh_stars>100-1000
package com.java110.goods.bmo.groupBuy;
import com.java110.po.groupBuy.GroupBuyPo;
import org.springframework.http.ResponseEntity;
public interface ISaveGroupBuyBMO {
/**
* 添加拼团购买
* add by wuxw
*
* @param groupBuyPo
* @return
*/
ResponseEntity<String> save(GroupBuyPo groupBuyPo);
}
|
#include <bits/stdc++.h>
const int maxn = 200;
using namespace std;
int main(){
#if LOCAL
freopen("1.txt","r",stdin);
#endif // LOCAL
int n , k;
scanf("%d %d", &n , &k);
int a[maxn], b[maxn];
for(int i = 0; i < n ; i++){
scanf("%d", &a[i]);
}
for(int i = 0; i < k; i++){
scanf("%d", &b[i]);
}
sort(b,b+k, greater<int> ());
// for(int i = 0; i < k; i++){
// printf("%d ", b[i]);
// }
int cnt = 0;
for(int i = 0; i < n; i++){
if(a[i] == 0){
a[i] = b[cnt];
cnt++;
}
}
int s;
for(s = 0; s < n - 1; s++){
if(a[s+1] <= a[s])
break;
}
printf("%s\n",s == n-1? "No" : "Yes");
return 0;
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jackrabbit.oak.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.Random;
import org.junit.Test;
public class ApproximateCounterTest {
@Test
public void veryHighResolution() {
for (int i = -100; i < 100; i++) {
assertEquals(i, ApproximateCounter.calculateOffset(i, -1));
assertEquals(i, ApproximateCounter.calculateOffset(i, 0));
assertEquals(i, ApproximateCounter.calculateOffset(i, 1));
}
}
@Test
public void regularResolution() {
ApproximateCounter.setSeed(0);
long result = 0;
long count = 100000;
int nonZero = 0;
for (long i = 0; i < count; i++) {
long offset = ApproximateCounter.calculateOffset(1, 1000);
if (offset != 0) {
nonZero++;
result += offset;
}
}
// most of the time, 0 needs to be returned
assertTrue(nonZero < count / 500);
// the expected result is within a certain range
assertTrue(Math.abs(result - count) < count / 10);
}
@Test
public void addRemove() {
ApproximateCounter.setSeed(0);
Random r = new Random(1);
long result = 0;
long exactResult = 0;
long count = 100000;
long sumChange = 0;
int nonZero = 0;
for (long i = 0; i < count; i++) {
int o = r.nextInt(20) - 10;
exactResult += o;
sumChange += Math.abs(o);
long offset = ApproximateCounter.calculateOffset(o, 1000);
if (offset != 0) {
nonZero++;
result += offset;
}
}
// most of the time, 0 needs to be returned
assertTrue(nonZero < count / 50);
// the expected result is within a certain range
assertTrue(Math.abs(result - exactResult) < sumChange / 10);
}
@Test
public void lowResolution() {
ApproximateCounter.setSeed(0);
long result = 0;
long count = 100000;
int nonZero = 0;
for (long i = 0; i < count; i++) {
long offset = ApproximateCounter.calculateOffset(1, 100);
if (offset != 0) {
offset = ApproximateCounter.adjustOffset(result, offset, 100);
}
if (offset != 0) {
nonZero++;
result += offset;
}
}
// most of the time, 0 needs to be returned
assertTrue(nonZero < count / 500);
// the expected result is within a certain range
assertTrue(Math.abs(result - count) < count / 10);
}
@Test
public void keepAboveZero() {
// adjustOffset ensures that the resulting count is larger or equal to 0
assertEquals(1234, ApproximateCounter.adjustOffset(-1234, -100, 10));
}
@Test
public void highResolutionAdjust() {
// adjustOffset with resolution of 1 should not affect the result
for (int i = 0; i < 10; i++) {
assertEquals(123, ApproximateCounter.adjustOffset(i, 123, 1));
}
}
}
|
def n_step_return(self, time, n_step):
if time + n_step < self.length:
returns = self.reward_list[time + n_step] + self.weight[self.trajectory_list[time + n_step]]
else:
returns = self.reward_list[-1]
return returns
|
<reponame>UGEL4/FakeReal
#pragma once
namespace FakeReal {
class RenderAPI;
class VertexArray;
class RenderCommand
{
public:
static void Init();
static void Release();
static void SetClearColor(float r, float g, float b, float a);
static void Clear();
static void SetViewPort(int x, int y, int width, int height);
static void DrawIndex(const VertexArray* pVa, unsigned int count);
static bool IsInited();
static void SetDepthFunc(unsigned int DepthFuncEnum);
static void EnableDepthTest(bool enable);
static void EnableStencilTest(bool enable);
static void DepthMask(bool mask);
static void StencilMask(unsigned int Mask);
static void SetStencilTestFunc(unsigned int FuncEnum, int RefValue, unsigned int Mask);
static void SetStencilTestOperator(unsigned int StencilFailFuncEnum, unsigned int DepthFailFuncEnum, unsigned int AllPassFuncEnum);
private:
RenderCommand() {}
static RenderAPI* ms_pRenderAPI;
};
}
|
<filename>eventx/event.go
package eventx
import "reflect"
//监听事件的回调
type EventCall func(evd *EventData)
func (c EventCall) Equal(c1 EventCall) bool {
return &c == &c1 || reflect.ValueOf(c).Pointer() == reflect.ValueOf(c1).Pointer()
}
// 事件数据
// @author xuzhuoxi
// Created on 2019/01/08.
type EventData struct {
// 事件类型
EventType string
// 事件传递的数据
Data interface{}
// 事件当前目标
// 抛出事件时传入
CurrentTarget interface{}
// 事件的发生器
// 分发事件的对象,DispatchEvent行为的所属对象,可能是内嵌对象
CurrentDispatcher IEventDispatcher
stopped bool
}
/**
* 是否设置为停止
* @returns {bool}
*/
func (ed *EventData) Stopped() bool {
return ed.stopped
}
/**
* 防止对事件流中当前节点中和所有后续节点中的事件侦听器进行处理
*/
func (ed *EventData) StopImmediatePropagation() {
ed.stopped = true
}
// 创建一个EventDispatcher
func NewEventDispatcher() *EventDispatcher {
return &EventDispatcher{}
}
type IEventDispatcher interface {
/**
* 添加事件
* @param eventType 事件类型
* @param func 监听函数
*/
AddEventListener(eventType string, call EventCall)
/**
* 添加单次执行事件
* @param eventType
* @param func
*/
OnceEventListener(eventType string, call EventCall)
/**
* 删除事件
* @param eventType 事件类型
* @param func 监听函数
*/
RemoveEventListener(eventType string, call EventCall)
/**
* 删除一类事件
* @param eventType 事件类型
*/
RemoveEventListenerByType(eventType string)
/**
* 清除全部事件
*/
RemoveEventListeners()
/**
* 触发某一类型的事件 并传递数据
* @param eventType 事件类型
* @param currentTarget 当前对象
* @param data 事件的数据(可为null)
*/
DispatchEvent(eventType string, currentTarget interface{}, data interface{})
}
type EventDispatcher struct {
dMap map[string]*_EventDelegate
}
func (e *EventDispatcher) AddEventListener(eventType string, call EventCall) {
e.getDelegate(eventType).AddListener(call)
}
func (e *EventDispatcher) OnceEventListener(eventType string, call EventCall) {
e.getDelegate(eventType).OnceListener(call)
}
func (e *EventDispatcher) RemoveEventListener(eventType string, call EventCall) {
if !e.hasType(eventType) {
return
}
e.getDelegate(eventType).RemoveListener(call)
}
func (e *EventDispatcher) RemoveEventListenerByType(eventType string) {
if !e.hasType(eventType) {
return
}
e.getDelegate(eventType).RemoveListeners()
}
func (e *EventDispatcher) RemoveEventListeners() {
e.dMap = nil
}
func (e *EventDispatcher) DispatchEvent(eventType string, currentTarget interface{}, data interface{}) {
if !e.hasType(eventType) {
return
}
d := &EventData{EventType: eventType, Data: data, CurrentTarget: currentTarget, CurrentDispatcher: e}
e.getDelegate(eventType).Handle(d)
}
func (e *EventDispatcher) hasType(eventType string) bool {
if nil == e.dMap {
return false
}
_, ok := e.dMap[eventType]
return ok
}
func (e *EventDispatcher) getDelegate(eventType string) *_EventDelegate {
if nil == e.dMap {
e.dMap = make(map[string]*_EventDelegate)
}
if !e.hasType(eventType) {
e.dMap[eventType] = &_EventDelegate{EventType: eventType}
}
d, _ := e.dMap[eventType]
return d
}
|
The Neurophysiology of Acupuncture: A Viewpoint
Acupuncture is a specialised sensory stimulation that is analysed through sensory neural pathways. Therefore to understand its action we have to analyse the anatomy, physiology and pharmacology of the nervous system, aided with a knowledge of neuroendocrinology and the chemoarchitecture of the brain. Various neural theories have been developed to explain the mechanisms of acupuncture. It is now evident that acupuncture reacts at local, regional (spinal cord) and general (brain) levels. Therefore, inserting one or more needles at particular points (or areas) of the body activates neural pathways on three different levels, provoking local, regional, and general reactions. The local reaction is a multifactorial phenomenon. The electric injury potential due to the needle, the presence and synthesis of opioid peptides at the site of injury, and substance P, histamine like substances, bradikinin, serotonin and proteolitic enzyme release around the needle, all occur during needling. The regional reaction concerns the activation of a larger area (2–3 dermatomes) via reflex arches. We can analyse the viscero-cutaneous, cutaneo-visceral, cutaneo-muscular and viscero-muscular reflexes and also the vegetative, stretch and polysynaptic segmental reflexes. The general reaction mainly activates the brain central mechanism of internal homoeostasis. We can explain the action of acupuncture in acute and chronic pain syndromes, in addiction and in psychiatric disease through the role of central neurotransmitters and the modulatory systems that are activated by acupoints: opioid, non-opioid and central sympathetic inhibitory mechanisms.
|
/* Gets the running processId of Minecraft */
DWORD MinecraftAppLauncher::GetProcessId(const std::wstring& processName)
{
PROCESSENTRY32 processInfo;
processInfo.dwSize = sizeof(processInfo);
HANDLE processesSnapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, NULL);
if (processesSnapshot == INVALID_HANDLE_VALUE)
return 0;
Process32First(processesSnapshot, &processInfo);
if (!processName.compare(processInfo.szExeFile))
{
CloseHandle(processesSnapshot);
return processInfo.th32ProcessID;
}
while (Process32Next(processesSnapshot, &processInfo))
{
if (!processName.compare(processInfo.szExeFile))
{
CloseHandle(processesSnapshot);
return processInfo.th32ProcessID;
}
}
CloseHandle(processesSnapshot);
return 0;
}
|
def go_to_posture(self, posture: str, speed: int = 100) -> None:
self.__send('action_posture', posture + ';' + str(speed) if 1 <= speed <= 100 else posture + ';100')
|
The effect of substitutents on the strain energies of small ring compounds.
The effect of substitutents on the strain energy (SE) of cyclic molecules is examined at the CBS, G2, and G2(MP2) levels of theory. Alkyl substitutents have a meaningful effect upon the SE of small ring compounds. gem-Dimethyl substitution lowers the strain energy of cyclopropanes, cyclobutanes, epoxides, and dimethyldioxirane (DMDO) by 6-10 kcal/mol relative to an unbranched acyclic reference molecule. The choice of the reference compound is especially important for geminal electronegative substitutents. The SE of 1,1-difluorocyclopropane is estimated to be 20.5 kcal/mol relative to acyclic reference molecule 1,3-difluoropropane but is 40.7 kcal/mol with respect to the thermodynamically more stable (DeltaE = -20.2 kcal/mol) isomeric reference compound 2,2-difluoropropane. The SE of dioxirane (DO) is estimated to be approximately 18 kcal/mol while the SE of DMDO is predicted to be approximately equal to 11 kcal/mol by using homodesmotic reactions that maintain a balanced group equivalency. The total energy (CBS-APNO) of DMDO is 2.6 kcal/mol lower than that of isomeric 1,2-dioxacyclopentane that has an estimated SE of 5 kcal/mol. The thermodynamic stability of DMDO is a consequence of its relatively strong C-H (BDE = 102.7 kcal/mol) and C-CH(3) (BDE = 98.9 kcal/mol) bonds. By comparison, the calculated sec-C-H and -C-CH(3) G2 bond dissociation energies in propane are 100.3 and 90.5 kcal/mol.
|
/**
* validates nested peakStructure. must have a parent peak with an atomRefs
* attribute of the same size as the number of children of this.
*
* @return true if has children of same size as parent atomRefs
*/
public boolean hasValidNestedPeakStructure() {
boolean has = false;
CMLElements<CMLPeakStructure> ps = this.getPeakStructureElements();
if (ps.size() > 0) {
if (this.getPeakStructureElements().size() > 0) {
Node parent = this.getParent();
if (parent != null && parent instanceof CMLPeak) {
String[] atomRefs = ((CMLPeak) parent).getAtomRefs();
if (atomRefs != null) {
has = ps.size() == atomRefs.length;
}
}
}
}
return has;
}
|
n=int(input())
a=[]
b=[]
for i in range(n):
x,y=input().split()
if int(x)>0:
a.append((int(x),int(y)))
else:
b.append((int(x),int(y)))
a.sort()
b.sort()
n=min(len(a),len(b))
ans=0
for i in range(n):
ans=ans+a[i][1]
ans=ans+b[-i-1][1]
if len(a)>n:
ans=ans+a[n][1]
elif len(b)>n:
ans=ans+b[-n-1][1]
print(ans)
|
#include <bits/stdc++.h>
using namespace std;
char str[20];
int main(int argc, char const *argv[]) {
scanf("%s", str+1);
int len = strlen(str+1);
int l = 1, r = len;
int ans = 0;
while (l <= r)
ans += str[l++] != str[r--];
puts(ans == 1 || ans == 0 && (len&1) ? "YES" : "NO");
return 0;
}
|
//
// Listen for messages on the usart channel and send them over the socket to
// be shown in the terminal
//
func uartTxComms(theChannel chan uint, conn net.Conn) {
var buf = []byte{0}
for {
v := <-theChannel
buf[0] = byte(v)
_, _ = conn.Write([]byte(buf))
}
}
|
#include<iostream>
#include<map>
using namespace std;
map<int, int >a;
int main(){
int n, z;
pair<int, int> max = { 0, 0 };
cin >> n;
while (n--){
cin >> z;
a[z]++;
if (a[z] > max.second)
max.second = a[z], max.first = z;
}
cout << max.first;
}
|
import java.util.Scanner;
public class x25A {
public static void main(String[] args) {
Scanner sc = new Scanner(System.in);
int n = sc.nextInt();
int odd=0; int even=0;
int evena=0; int odda=0;
int[] array = new int[n];
for (int a=0;a<n;a++){
array[a]=sc.nextInt();
if(array[a]%2==1){odd++;odda=a+1;}
if(array[a]%2==0){even++;evena=a+1;}
}
if(odd==1)System.out.println(odda);
else System.out.println(evena);
}
}
|
Image caption The prime minister is keen to make London an international centre of Islamic finance
Prime Minister David Cameron has announced that the UK will become the first non-Muslim country to issue an Islamic bond.
At the World Islamic Economic Forum (WIEF) in London, he also announced plans for a new Islamic index on the London Stock Exchange.
It is intended to attract more Islamic investors by making it clearer which companies match their criteria.
Under Islamic rules, no interest on business dealings can be charged.
Transactions must also be based on a real trade or business activities and they must not involve anything forbidden under Sharia law, such as gambling or alcohol.
Mr Cameron believes that Islamic finance is a good opportunity for the UK's financial services industry as Islamic investments have soared by 150% in the past seven years and are expected to be worth £1.3 trillion next year.
Media playback is unsupported on your device Media caption David Slater: 'It's not about politics or religion - it's about opportunity'
"I don't just want London to be a great capital of Islamic finance in the Western world, I want London to stand alongside Dubai as one of the great capitals of Islamic finance anywhere in the world," he told delegates of the WIEF.
"There are some countries which naturally look inwards, pull up the drawbridge and refuse to recognise that the way the world is changing affects their future success. But Britain will not make that mistake," he added.
The Treasury hopes to launch an Islamic bond - called a sukuk - worth £200m as early as next year. It will be structured to bring in a fixed return from a tangible asset or service, and without charging interest, in accordance with Islamic financial principles.
|
def root(self):
if self.iframe:
try:
Page.get_driver().switch_to.default_content()
Page.get_driver().switch_to.frame(self.iframe)
except NoSuchFrameException:
log.error(
"unable to switch to iframe %s" % self.iframe
)
try:
if not self.__root_val:
WebDriverWait(Page.get_driver(), self.timeout).until(
lambda _: Page.get_driver().find_element(
self.locator_type,
self.query_string
)
)
return self.__root_val or Page.get_driver().find_element(
self.locator_type, self.query_string
)
except (
NoSuchElementException, TimeoutException,
StaleElementReferenceException
):
return None
|
// NewEnvironment initializes the test Environment.
func NewEnvironment(t *testing.T, wd string, run RunFunc) *Environment {
return &Environment{
t: t,
wd: wd,
env: os.Environ(),
run: run,
}
}
|
def create_ransomware_incident(alert) -> Dict[str, Any]:
property_dict = _get_property_dict(alert['propertyList'])
incidence_usecs = alert.get("latestTimestampUsecs", 0)
occurance_time = get_date_time_from_usecs(
incidence_usecs).strftime(DATE_FORMAT)
return {
"name": alert['alertDocument']['alertName'],
"type": "Cohesity-Helios-Ransomware-Incident",
"event_id": alert.get("id"),
"occurred": occurance_time,
"CustomFields": {
"alert_description": alert['alertDocument']['alertDescription'],
"alert_cause": alert['alertDocument']['alertCause'],
"anomalous_object": property_dict.get('object'),
"environment": property_dict.get('environment'),
"anomaly_strength": property_dict.get('anomalyStrength')
},
"rawJSON": json.dumps(alert),
"severity": convert_to_demisto_severity_int(alert.get('severity'))
}
|
/**
* Writes a data source Item to a design. Hierarchical select components
* should override this method to recursively write any child items as well.
*
* @param design
* the element into which to insert the item
* @param item
* the item to write
* @param context
* the DesignContext instance used in writing
* @return a JSOUP element representing the {@code item}
*/
protected Element writeItem(Element design, T item, DesignContext context) {
Element element = design.appendElement("option");
String caption = getItemCaptionGenerator().apply(item);
if (caption != null) {
element.html(DesignFormatter.encodeForTextNode(caption));
} else {
element.html(DesignFormatter.encodeForTextNode(item.toString()));
}
element.attr("item", serializeDeclarativeRepresentation(item));
Resource icon = getItemIconGenerator().apply(item);
if (icon != null) {
DesignAttributeHandler.writeAttribute("icon", element.attributes(),
icon, null, Resource.class, context);
}
return element;
}
|
<filename>demo/run_inference.py
import argparse
import json
import os
import statistics
import time
import cv2
import numpy as np
from utils import SimpleObjectSegmentation, FashionClassifier, extract_roi
parser = argparse.ArgumentParser()
parser.add_argument("--model",type=str,required=True,
choices=["cnn", "resnet18"],help="Name of the model to train.",
)
parser.add_argument("--weights_path",type=str,required=True,
help="Path of the weights to load for the model.",
)
parser.add_argument("--device",type=int,default="0",
help="ID of the device to use for the realtime video capture.",
)
parser.add_argument("--display_input",
help="Display the input of the model in the top left corner.",
action="store_true",
)
parser.add_argument("--display_fps",
help="Display the FPS in the top right corner.",
action="store_true",
)
parser.add_argument("--path_classes",
default=os.path.join("..", "models", "classes.json"),type=str,
help="Path to the json containing the classes of the Fashion MNIST dataset.",
)
args = parser.parse_args()
if __name__ == "__main__":
# Print instructions
print(f"------------- FASHION CLASSIFIER -------------")
print(f" -> Press 'q' to quit")
print(
f" -> Press 'n' to reinitialize the background image for the simple object segmentation algorithm."
)
# Import the classes
with open(args.path_classes) as json_file:
classes = json.load(json_file)
# Color use to draw and write
color = (0, 255, 0)
# list of the inference time
list_inference_time = []
# create the webcam device
webcam = cv2.VideoCapture(args.device)
# Get the first frame as reference for the object segmentation algorithm
if webcam.isOpened():
_, first_frame = webcam.read()
# Instanciate the class responsible for the object detection and segmentation
detector = SimpleObjectSegmentation(first_frame)
# Instanciate the class responsible for the classifying the fashion objects
fashion_classifier = FashionClassifier(
model_name=args.model, weights_path=args.weights_path, classes=classes
)
# Initialize the flag to update the reference frame of the detector
update_detector_reference = False
# Start the capturing and processing loop
while True:
# Start the timer for the fps computation
start_time = time.time()
# Capture frame-by-frame
_, frame = webcam.read()
# Update the reference frame is the flag is set
if update_detector_reference:
detector.update_reference(frame)
update_detector_reference = False
print("Updated the reference image of the detector.")
# Use the detector to grab the mask and the bounding-box of the biggest object in the frame
mask, bbox = detector.detect_object(frame)
# If an object was found
if bbox is not None:
# --- EXTRACTION ---
# Extract the roi around the object and remove its background using the mask
roi = extract_roi(frame, mask, bbox)
# --- CLASSIFICATION ---
(
input_image,
confidence,
prediction,
processing_time,
) = fashion_classifier.inference(roi)
# Add the inference time to the list of inference time
list_inference_time.append(processing_time)
# --- DISPLAY ---
x, y, w, h = bbox
# Draw the bounding box around the ROI
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
# Write the confidence and predicted class on top of the bounding box
cv2.putText(
frame,
f"{prediction[0]} {confidence[0]:.2f}",
(x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
color,
2,
)
# Display the input image in the top left corner
if args.display_input:
# Resize the input image
scale = 3
(input_height, input_width) = input_image.shape[:2]
input_image_to_display = cv2.resize(
input_image, (input_width * scale, input_height * scale)
)
# Convert the input image to 3 channels
input_image_rgb = cv2.cvtColor(
input_image_to_display, cv2.COLOR_GRAY2BGR
)
# Display the input image on top of the current frame
frame[
: input_height * scale, : input_width * scale, :
] = input_image_rgb
# Display the FPS counter
if args.display_fps:
fps = 1 / (time.time() - start_time)
cv2.putText(
frame,
f"{fps:05.2f} FPS",
(frame.shape[1] - 90, 15),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
color,
2,
)
# Show the current frame with the other information
cv2.imshow("Fashion Classifier", frame)
# Wait for the different keys
c = cv2.waitKey(1)
if c == ord("q"):
print(
f"Average inference time for the {args.model} model over {len(list_inference_time)} inferences is {statistics.mean(list_inference_time)*1000:.3f}ms."
)
break
elif c == ord("n"):
update_detector_reference = True
# When everything done, release the webcam and destroy the window
webcam.release()
cv2.destroyAllWindows()
|
package ai.verta.modeldb.common.config;
import ai.verta.modeldb.common.CommonMessages;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
@Data
@NoArgsConstructor
@AllArgsConstructor
@Getter
@Setter(AccessLevel.NONE)
public class NFSConfig {
@JsonProperty private String nfsUrlProtocol = "https";
@JsonProperty private String nfsRootPath;
@JsonProperty private String nfsServerHost = "";
@JsonProperty private NFSEndpointConfig artifactEndpoint;
@JsonProperty private String nfsPathPrefix;
public void validate(String base) throws InvalidConfigException {
if (nfsRootPath == null || nfsRootPath.isEmpty())
throw new InvalidConfigException(base + ".nfsRootPath", CommonMessages.MISSING_REQUIRED);
if (artifactEndpoint == null)
throw new InvalidConfigException(base + ".artifactEndpoint", CommonMessages.MISSING_REQUIRED);
artifactEndpoint.validate(base + ".artifactEndpoint");
}
public String storeTypePathPrefix() {
return String.format("nfs://%s/", nfsRootPath);
}
public void setNfsRootPath(String nfsRootPath) {
this.nfsRootPath = nfsRootPath;
}
}
|
import {action} from "janet-ts";
import {
Annotation,
BodyAnnotation,
FieldAnnotation,
NamedAnnotation,
PathParamAnnotation,
QueryAnnotation,
RequestHeaderAnnotation,
ResponseHeaderAnnotation,
} from "./HTTPAnnotation";
export * from "./HTTPAnnotation";
export type Method = "GET" | "POST" | "PUT" | "DELETE" | "HEAD";
export const metaKey = "META::HTTPAction";
export interface RequestScheme {
readonly url: string;
readonly method: Method;
readonly formFields: ReadonlyArray<FieldAnnotation>;
readonly queryParams: ReadonlyArray<QueryAnnotation>;
readonly pathParams: ReadonlyArray<PathParamAnnotation>;
readonly requestHeaders: ReadonlyArray<RequestHeaderAnnotation>;
readonly responseHeaders: ReadonlyArray<ResponseHeaderAnnotation>;
readonly body: BodyAnnotation | null;
}
interface HTTPRequestInfo {
readonly url: string;
readonly method: Method;
readonly annotations: any;
}
interface AnnotationConstructor<T> {
new (...mutableParams: any[]): T;
readonly name?: string;
}
function extractRequestInfo(httpAction: any): HTTPRequestInfo | null {
return Reflect.getMetadata(metaKey, httpAction.constructor);
}
function extractFields<T extends Annotation>(requestInfo: HTTPRequestInfo,
annotationType: AnnotationConstructor<T>): ReadonlyArray<T> {
const annotations = requestInfo.annotations[annotationType.name!];
if (annotations) {
return annotations;
} else {
return [];
}
}
function extractField<T extends Annotation>(requestInfo: HTTPRequestInfo,
annotationType: AnnotationConstructor<T>): T | null {
const annotations = extractFields(requestInfo, annotationType);
if (annotations[0]) {
return annotations[0];
} else {
return null;
}
}
export function isHTTPAction(httpAction: any): boolean {
const requestInfo = extractRequestInfo(httpAction);
return requestInfo != null;
}
export function parseHTTPAction(httpAction: any): RequestScheme {
const requestInfo = extractRequestInfo(httpAction);
if (requestInfo == null) {
throw new Error("Invalid HTTP Action:" + httpAction);
}
const body = extractField(requestInfo, BodyAnnotation);
return {
url: requestInfo.url,
method: requestInfo.method,
formFields: extractFields(requestInfo, FieldAnnotation),
queryParams: extractFields(requestInfo, QueryAnnotation),
pathParams: extractFields(requestInfo, PathParamAnnotation),
requestHeaders: extractFields(requestInfo, RequestHeaderAnnotation),
responseHeaders: extractFields(requestInfo, ResponseHeaderAnnotation),
body: body,
};
}
export interface NamedValue {
readonly name: string;
readonly value: any;
}
export interface HTTPRequest {
readonly url: string;
readonly method: Method;
readonly queryParams: ReadonlyArray<NamedValue>;
readonly headers: any;
readonly body: any;
}
function applyPathParams(url: string, params: ReadonlyArray<NamedValue>): string {
let updatedURL = url;
params.forEach((pathParam: NamedValue) => {
updatedURL = updatedURL.replace("{" + pathParam.name + "}", pathParam.value);
});
return updatedURL;
}
function applyQueryParams(url: string, params: ReadonlyArray<NamedValue>): string {
if (params.length > 0) {
const queryPairs = params.map((param: NamedValue) => {
return param.name + "=" + param.value;
});
return url + "?" + queryPairs.join("&");
} else {
return url;
}
}
function toHash(values: ReadonlyArray<NamedValue>): any {
const hash: any = {};
values.forEach((value: NamedValue) => {
hash[value.name] = value.value;
});
return hash;
}
export function createHTTPRequestFromScheme(httpAction: any, scheme: RequestScheme): HTTPRequest {
const toParams = (annotation: NamedAnnotation): NamedValue => {
return {
name: annotation.name,
value: annotation.propertyAccessor.readValue(httpAction),
};
};
const pathParams: NamedValue[] = scheme.pathParams.map(toParams);
const queryParams: NamedValue[] = scheme.queryParams.map(toParams);
const headers: any = toHash(scheme.requestHeaders.map(toParams));
const formFields: NamedValue[] = scheme.formFields.map(toParams);
let body = scheme.body != null ? scheme.body.propertyAccessor.readValue(httpAction) : null;
if (!body && formFields.length > 0) {
body = {};
formFields.forEach((param: NamedValue) => {
body[param.name] = param.value;
});
}
let url = applyPathParams(scheme.url, pathParams);
url = applyQueryParams(url, queryParams);
return {
url: url,
method: scheme.method,
queryParams: queryParams,
headers: headers,
body: body,
};
}
export function createHTTPRequestFromAction(httpAction: any): HTTPRequest {
const scheme = parseHTTPAction(httpAction);
return createHTTPRequestFromScheme(httpAction, scheme);
}
export function HttpAction(url: string, method: Method = "GET"): ClassDecorator {
return (target: any) => {
let metadata = Reflect.getMetadata(metaKey, target);
if (!metadata) {
metadata = {
annotations: {},
};
}
metadata.url = url;
metadata.method = method;
Reflect.defineMetadata(metaKey, metadata, target);
return action(target);
};
}
|
def label_from_attrs(da, extra=""):
if da.attrs.get("long_name"):
name = da.attrs["long_name"]
elif da.attrs.get("standard_name"):
name = da.attrs["standard_name"]
elif da.name is not None:
name = da.name
else:
name = ""
def _get_units_from_attrs(da):
if da.attrs.get("units"):
units = " [{}]".format(da.attrs["units"])
elif da.attrs.get("unit"):
units = " [{}]".format(da.attrs["unit"])
else:
units = ""
return units
pint_array_type = DuckArrayModule("pint").type
if isinstance(da.data, pint_array_type):
units = " [{}]".format(str(da.data.units))
else:
units = _get_units_from_attrs(da)
if name.startswith("$") and (name.count("$") % 2 == 0):
return "$\n$".join(
textwrap.wrap(name + extra + units, 60, break_long_words=False)
)
else:
return "\n".join(textwrap.wrap(name + extra + units, 30))
|
module Machine.Semantics where
import Prelude hiding (Read, Monad, div, mod, abs)
import qualified Prelude (Read, Monad, div, mod, abs)
import Data.Functor (void)
import Data.Either (partitionEithers)
import Control.Selective
import Control.Arrow (second)
import Machine.Decode (decode)
import qualified Data.Set as Set
import Data.String (fromString)
import Data.Maybe (fromJust)
import Algebra.Graph hiding (graph)
import Algebra.Graph.Export.Dot
import Machine.Types
--------------------------------------------------------------------------------
---------------- Instruction Semantics -----------------------------------------
--------------------------------------------------------------------------------
data Key a where
Reg :: Register -> Key (Value)
-- ^ register
Addr :: MemoryAddress -> Key (Value)
-- ^ memory address
F :: Flag -> Key (Bool)
-- -- ^ flag
IC :: Key (Value)
-- -- ^ instruction counter
IR :: Key (InstructionCode)
-- ^ instruction register
Prog :: InstructionAddress -> Key (InstructionCode)
-- ^ program memory address
instance Show (Key a) where
show = \case
Reg reg -> show reg
Addr addr -> show addr
F flag -> show flag
IC -> "IC"
IR -> "IR"
Prog addr -> show addr
type Read f = forall a. Key a -> f (Sym a)
type Write f = forall a. Key a -> f (Sym a) -> f (Sym a)
type FS c a = forall f. c f => Read f ->
Write f ->
f a
--------------------------------------------------------------------------------
--------------- Data-flow graphs of programs -----------------------------------
--------------------------------------------------------------------------------
type KeyLabel = String
type InstructionLabel = String
-- | Extract input and output data-dependencies of a computation
dependencies :: FS Selective a
-> ([KeyLabel], [KeyLabel])
dependencies task =
partitionEithers . getOver $
task trackingRead trackingWrite
where trackingRead k = Over [Left $ show k]
trackingWrite k fv = fv *> Over [Right $ show k]
-- | Compute static data flow graph of an instruction. In case of supplying a
-- monadic, i.e. data-dependent instruction, 'Nothing' is returned.
--
-- Since no data requiring simulation is performed, the semantics metalanguage
-- terms are mocked: 'read' becomes 'const 0' is 'write' is simply ignored.
instructionGraph :: (InstructionAddress, Instruction)
-> Maybe (Graph (Either KeyLabel InstructionLabel))
instructionGraph i@(addr, instr) = do
let (ins, outs) = dependencies (instructionSemantics instr)
let instrInfo = instructionLabel
pure $ overlay (star (Right instrInfo) (map Left outs))
(transpose $ star (Right instrInfo) (map Left ins))
where instructionLabel = (show addr <> "|" <> show instr)
-- | Serialise data flow graph as a .dot string
drawGraph :: Graph (Either KeyLabel InstructionLabel) -> String
drawGraph g = export style g
where
style = defaultStyleViaShow
{ vertexName = \v -> "v" ++ show (fromJust $ Set.lookupIndex v names)
, vertexAttributes = \x -> case x of
Left k -> [ "shape" := "circle"
, "label" := k ]
Right i -> [ "shape" := "record"
, "label" := i ] }
names = vertexSet g
-- instructionLabel a i = fromString (show a <> "|" <> show i)
-- | Compute static data flow graph of a program. In case of supplying a
-- monadic, i.e. data-dependent instruction, 'Nothing' is returned.
programDataGraph :: Program
-> Maybe (Graph (Either KeyLabel InstructionLabel))
programDataGraph p =
let p' = map (second decode) p
in foldl go (Just empty) (map instructionGraph p')
where go _ Nothing = Nothing
go acc g = overlay <$> acc <*> g
--------------------------------------------------------------------------------
--------------- Semantics of instructions --------------------------------------
--------------------------------------------------------------------------------
-- | Halt the execution.
-- Applicative.
halt :: FS Applicative ()
halt read write = void $
write (F Halted) (pure (SConst True))
set :: Register -> SImm8 -> FS Applicative ()
set reg simm read write = void $
write (Reg reg) (pure (SConst . fromIntegral $ simm))
-- | Add a value from memory location to one in a register.
-- Applicative.
--
-- It looks like we after all need a Monad here since Applicative theoretically permits reordering
-- of effects. Here, it is important for the overflow check to happen before the addition, otherwise
-- the check may be executed with the updated value of the register and will give an invalid result.
add :: Register -> MemoryAddress
-> FS Applicative ()
add reg addr = \read write -> void $
let arg1 = read (Reg reg)
arg2 = read (Addr addr)
result = SAdd <$> arg1
<*> arg2
overflow = willOverflow <$> arg1 <*> arg2
in write (F Overflow) overflow *>
write (F Zero) ((SEq (SConst 0)) <$> write (Reg reg) result)
where willOverflow :: Sym Value -> Sym Value -> Sym Bool
willOverflow x y =
let o1 = SGt y (SConst 0)
o2 = SGt x (SSub maxBound y)
o3 = SLt y (SConst 0)
o4 = SLt x (SSub minBound y)
in SOr (SAnd o1 o2)
(SAnd o3 o4)
-- | Load a value from a memory location to a register.
-- Functor.
load :: Register -> MemoryAddress
-> FS Functor ()
load reg addr read write = void $
write (Reg reg) (read (Addr addr))
-- | Store a value from a register to a memory location.
-- Functor.
store :: Register -> MemoryAddress -> FS Functor ()
store reg addr read write = void $
write (Addr addr) (read (Reg reg))
-- let overflow = arg2 .> 0 &&& arg1 .< (minBound @Value + arg2) |||
-- arg2 .< 0 &&& arg1 .> (maxBound @Value + arg2)
-- | Sub a value from memory location to one in a register.
-- Applicative.
sub :: Register -> MemoryAddress -> FS Applicative ()
sub reg addr = \read write -> void $
let arg1 = read (Reg reg)
arg2 = read (Addr addr)
result = SSub <$> arg1
<*> arg2
overflow = willOverflow <$> arg1 <*> arg2
in write (F Overflow) overflow *>
write (F Zero) ((SEq (SConst 0)) <$> write (Reg reg) result)
where willOverflow :: Sym Value -> Sym Value -> Sym Bool
willOverflow x y =
let o1 = SGt y (SConst 0)
o2 = SLt x (SAdd minBound y)
o3 = SLt y (SConst 0)
o4 = SGt x (SAdd maxBound y)
in SOr (SAnd o1 o2)
(SAnd o3 o4)
-- | Multiply a value from memory location to one in a register.
-- Applicative.
mul :: Register -> MemoryAddress -> FS Applicative ()
mul reg addr = \read write -> void $
let result = (SMul) <$> read (Reg reg) <*> read (Addr addr)
in write (F Zero) ((SEq (SConst 0)) <$> write (Reg reg) result)
-- in write (F Zero) (write (Reg reg) result)
-- | Subtract a value from memory location to one in a register.
-- Applicative.
div :: Register -> MemoryAddress -> FS Applicative ()
div reg addr = \read write -> void $
let result = SDiv <$> read (Reg reg) <*> read (Addr addr)
in write (F Zero) ((SEq (SConst 0)) <$> write (Reg reg) result)
-- in write (F Zero) (write (Reg reg) result)
mod :: Register -> MemoryAddress -> FS Applicative ()
mod reg addr = \read write -> void $
let arg1 = read (Reg reg)
arg2 = read (Addr addr)
result = SMod <$> arg1
<*> arg2
overflow = willOverflow <$> arg1 <*> arg2
in write (F Overflow) overflow *>
write (F Zero) ((SEq (SConst 0)) <$> write (Reg reg) result)
where willOverflow :: Sym Value -> Sym Value -> Sym Bool
willOverflow x y =
let o1 = SEq y (SConst 0)
o2 = SEq x minBound
o3 = SEq y (SConst (-1))
in SOr o1
(SAnd o2 o3)
abs :: Register -> FS Applicative ()
abs reg = \read write -> void $
let result = SAbs <$> read (Reg reg)
overflow x = SLt <$> x <*> pure (SConst 0)
in write (F Overflow) (overflow $ write (Reg reg) result)
-- | Unconditional jump.
-- Functor.
jump :: SImm8 -> FS Functor ()
jump simm read write = void $
write IC (fmap (SAdd (SConst . fromIntegral $ simm)) (read IC))
-- | Note this the this polymorphic semantics of conditional jumps only works
-- for simulation and dependency analysis. For symbolic execution it has
-- to be implemented separately. See 'Machine.Symbolic.jumpZeroSym'.
jumpZero :: SImm8 -> FS Selective ()
jumpZero _ _ _ = error "jumpZero not implemented"
-- | Note this the this polymorphic semantics of conditional jumps only works
-- for simulation and dependency analysis. For symbolic execution it has
-- to be implemented separately. See 'Machine.Symbolic.jumpCtSym'.
jumpCt :: SImm8 -> FS Selective ()
jumpCt offset read write =
whenS (unliteral <$> read (F Condition))
(void $ write IC (SAdd <$> pure (SConst (fromIntegral offset))
<*> read IC))
-- | Note this the this polymorphic semantics of conditional jumps only works
-- for simulation and dependency analysis. For symbolic execution it has
-- to be implemented separately. See 'Machine.Symbolic.jumpCfSym'.
jumpCf :: SImm8 -> FS Selective ()
jumpCf offset read write =
whenS (not . unliteral <$> read (F Condition))
(void $ write IC (SAdd <$> pure (SConst (fromIntegral offset))
<*> read IC))
cmpEq :: Register -> MemoryAddress -> FS Applicative ()
cmpEq reg addr = \read write -> void $
write (F Condition) (SEq <$> read (Reg reg) <*> read (Addr addr))
cmpGt :: Register -> MemoryAddress -> FS Applicative ()
cmpGt reg addr = \read write -> void $
write (F Condition) (SGt <$> read (Reg reg) <*> read (Addr addr))
cmpLt :: Register -> MemoryAddress -> FS Applicative ()
cmpLt reg addr = \read write -> void $
write (F Condition) (SLt <$> read (Reg reg) <*> read (Addr addr))
instructionSemantics :: Instruction -> FS Selective ()
instructionSemantics (Instruction i) r w = case i of
Halt -> halt r w
Load reg addr -> load reg addr r w
-- -- LoadMI reg addr -> loadMI reg addr r w
Set reg simm8 -> set reg simm8 r w
Store reg addr -> store reg addr r w
Add reg addr -> add reg addr r w
Sub reg addr -> sub reg addr r w
Mul reg addr -> mul reg addr r w
Div reg addr -> div reg addr r w
Mod reg addr -> mod reg addr r w
Abs reg -> abs reg r w
Jump simm8 -> jump simm8 r w
JumpZero simm8 -> jumpZero simm8 r w
CmpEq reg addr -> cmpEq reg addr r w
CmpGt reg addr -> cmpGt reg addr r w
CmpLt reg addr -> cmpLt reg addr r w
JumpCt simm8 -> jumpCt simm8 r w
JumpCf simm8 -> jumpCf simm8 r w
|
/**
* Filters out all late commits of submissions from the checked out repository of a participation
*
* @param submissionDate The submission date (inclusive), after which all submissions should get filtered out
* @param participation The participation related to the repository
* @param repo The repository for which to filter all late submissions
*/
private void filterLateSubmissions(ZonedDateTime submissionDate, ProgrammingExerciseStudentParticipation participation, Repository repo) {
log.debug("Filter late submissions for participation {}", participation.toString());
Optional<Submission> lastValidSubmission = participation.getSubmissions().stream()
.filter(s -> s.getSubmissionDate() != null && s.getSubmissionDate().isBefore(submissionDate)).max(Comparator.comparing(Submission::getSubmissionDate));
gitService.filterLateSubmissions(repo, lastValidSubmission, submissionDate);
}
|
<reponame>dubiety/azure-sdk-for-python
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from .sweep import Sweep
from .command import Command
from .parallel import Parallel
from .base_node import parse_inputs_outputs, BaseNode
__all__ = ["BaseNode", "Sweep", "Command", "Parallel", "parse_inputs_outputs"]
|
#![allow(non_snake_case)]
#![allow(unused_variables)]
#![allow(dead_code)]
fn main() {
let N: i64 = {
let mut line: String = String::new();
std::io::stdin().read_line(&mut line).unwrap();
line.trim().parse().unwrap()
};
fn f(x: i64, m: usize, N: i64) -> i64 {
let K = N.to_string().len();
let s = x.to_string();
let a = if x <= N && s.contains(&"7") && s.contains(&"5") && s.contains(&"3") { 1 } else { 0 };
if m < K { a + f(10 * x + 7, m + 1, N) + f(10 * x + 5, m + 1, N) + f(10 * x + 3, m + 1, N) }
else { a }
}
let ans = f(0, 0, N);
println!("{}", ans);
}
|
<filename>src/app/estadisticas/estadisticas.module.spec.ts
import { EstadisticasModule } from './estadisticas.module';
describe('EstadisticasModule', () => {
let estadisticasModule: EstadisticasModule;
beforeEach(() => {
estadisticasModule = new EstadisticasModule();
});
it('should create an instance', () => {
expect(estadisticasModule).toBeTruthy();
});
});
|
rsync Usage rsync [OPTION]... SRC [SRC]... DEST rsync [OPTION]... SRC [SRC]... [USER@]HOST:DEST rsync [OPTION]... SRC [SRC]... [USER@]HOST::DEST rsync [OPTION]... SRC [SRC]... rsync://[USER@]HOST[:PORT]/DEST rsync [OPTION]... [USER@]HOST:SRC [DEST] rsync [OPTION]... [USER@]HOST::SRC [DEST] rsync [OPTION]... rsync://[USER@]HOST[:PORT]/SRC [DEST] The ':' usages connect via remote shell, while '::' & 'rsync://' usages connect to an rsync daemon, and require SRC or DEST to start with a module name. rsync Options --verbose, -v increase verbosity --quiet, -q suppress non-error messages --no-motd suppress daemon-mode MOTD (see manpage caveat) --checksum, -c skip based on checksum, not mod-time & size --archive, -a archive mode; equals -rlptgoD (no -H,-A,-X) --no-OPTION turn off an implied OPTION (e.g. --no-D) --recursive, -r recurse into directories --relative, -R use relative path names --no-implied-dirs don't send implied dirs with --relative --backup, -b make backups (see --suffix & --backup-dir) --backup-dir=DIR make backups into hierarchy based in DIR --suffix=SUFFIX set backup suffix (default ~ w/o --backup-dir) --update, -u skip files that are newer on the receiver --inplace update destination files in-place (SEE MAN PAGE --append append data onto shorter files --append-verify like --append, but with old data in file checksum --dirs, -d transfer directories without recursing --links, -l copy symlinks as symlinks --copy-links, -L transform symlink into referent file/dir --copy-unsafe-links only "unsafe" symlinks are transformed --safe-links ignore symlinks that point outside the source tree --copy-dirlinks, -k transform symlink to a dir into referent dir --keep-dirlinks, -K treat symlinked dir on receiver as dir --hard-links, -H preserve hard links --perms, -p preserve permissions --executability, -E preserve the file's executability --chmod=CHMOD affect file and/or directory permissions --acls, -A preserve ACLs (implies --perms) --xattrs, -X preserve extended attributes --owner, -o preserve owner (super-user only) --group, -g preserve group --devices preserve device files (super-user only) --specials preserve special files -D same as --devices --special --times, -t preserve modification times --omit-dir-times, -O omit directories from --times --super receiver attempts super-user activities --fake-super store/recover privileged attrs using xattrs
rsync Options contd. --sparse, -S handle sparse files efficiently --dry-run, -n perform a trial run with no changes made --whole-file, -W copy files whole (without delta-xfer algorithm) --one-file-system, -x don't cross filesystem boundaries --block-size=SIZE force a fixed checksum block-size --rsh=COMMAND, -e specify the remote shell to use --rsync-path=PROGRAM specify the rsync to run on the remote machine --existing skip creating new files on receiver --ignore-existing skip updating files that already exist on receiver --remove-source-files sender removes synchronized files (non-dirs) --del an alias for --delete-during --delete delete extraneous files from destination dirs --delete-before receiver deletes before transfer, not during --delete-during receiver deletes during transfer (default) --delete-delay find deletions during, delete after --delete-after receiver deletes after transfer, not during --delete-excluded also delete excluded files from destination dirs --ignore-errors delete even if there are I/O errors --force force deletion of directories even if not empty --max-delete=NUM don't delete more than NUM files --max-size=SIZE don't transfer any file larger than SIZE --min-size=SIZE don't transfer any file smaller than SIZE --partial keep partially transferred files --partial-dir=DIR put a partially transferred file into DIR --delay-updates put all updated files into place at transfer's end --prune-empty-dirs, -m prune empty directory chains from the file-list --numeric-ids don't map uid/gid values by user/group name --timeout=SECONDS set I/O timeout in seconds --contimeout set daemon connection timeout in seconds --ignore-times, -I don't skip files that match in size and mod-time --size-only skip files that match in size --modify-window=NUM compare mod-times with reduced accuracy --temp-dir, -T create temporary files in directory DIR --fuzzy, -y find similar file for basis if no dest file --compare-dest=DIR also compare destination files relative to DIR --copy-dest=DIR ... and include copies of unchanged files --link-dest=DIR hardlink to files in DIR when unchanged --compress, -z compress file data during the transfer
|
A sample of frozen Siberian soil has revealed that a giant virus, visible through a light microscope, retained its potency despite being frozen for thousands of years.
The virus, pithovirus sibericum, attacked single-cell organisms despite having not done any infecting for a few hundred centuries.
Jean-Michel Claverie, an evolutionary biologist at Aix Marseille-University, is worried because it means that there might be a life-threatening amount of viruses lurking within the Earth's frozen soil layers, waiting patiently to be unleashed by melting Arctic ice. “We might be able to eradicate viruses from the surface of the planet," Claverie says, "but that doesn't mean that there isn't a single particle of that virus still alive somewhere.”
So as if we weren't worried about climate change enough, let's take a look at a few of the viruses that would put a complete downer on everything were they to return.
|
5 Ingredient Homemade KIND Nut Bars (V, GF, DF): an easy, one bowl recipe for irresistibly salty and sweet homemade KIND bars. Protein-Packed, Low Carb, Vegan, Gluten-Free, Dairy-Free, Refined Sugar-Free. Recipe video included! Just 106 calories per bar, with only 5 grams of sugar, 8 grams of carbs and 4 grams of protein each!
Happy almost Back to School season!! That’s totally a thing, don’t you think? Like, an entire season devoted to back to school? It’s probably pretty obvious that I’m a bit too excited about all the snack-making this means. As evidenced by the following: 4 ingredient no bake peanut butter coconut bites, cranberry almond breakfast bars, cherry chocolate chip chia bites (I know), and one of my all-time faves, oatmeal raisin cookies!
There’s just so much going on during these months. And we’ve got to stay fueled throughout it all. It’s kind of like the storm before the calm before the storm. The second storm being THE HOLIDAYS, of course.
But we definitely do not need to talk about the holidays until they arrive. Except we probably will because… who else loves the holidays? 🙂 Alright, I promise to slow things down a bit. Anyway, so back to back to school. This time of year, it seems like everyone’s moving, packing, unpacking, making snacks, picking classes, going for one last vacation, and otherwise engaging in all the activities.
Even though it’s been ages since I’ve been “back to school,” I still think of August/September as the back to school season. And I’m like, let’s do this!
I even go to Target and search for a new pencil case (then realize I don’t use pencils anymore, because: INTERNET.) But they have sparkly pink, purple and aqua pencil cases!! Who even needs to put pencils in them anyway? New makeup case, anyone?
I even go to the mall to shop for new dorm-ready comforter sets. Then realize I don’t need them because: ADULTING. But then I think my mom would be so charmed by this 7-piece comforter set that comes with not one, but two fuzzy pillows, and a pillow sham, and a backup pillow sham in case the first one gets eaten by the aformentioned fuzzy pillow cases. Or something.
I might even stroll to the unconventional/stylish/industrial-but-modern-and-somehow-charmingly-beachhouse-inspired seating they’ve got at the furniture store. Then I realize that we sooo don’t have the space in our being-a-for-realsies-adult-ridiculous-rent-in-a-big-city-apartment. And we so can’t pull of shabby-industrial-modern-beachhouse-chic if someone paid us to. But then I’m like, maybe we could… Erik, don’t fail me now!
I might even consider a gadget or 300 from the electronic cool kids store. I mean, I can obviously pull off those chunky earphones that match with none of my wardrobe because I’m not cool. At all. But maybe those chunky earphones will make me cool.
Ah… back-to-school. So many dreams. So many hopes, all in one shopping cart. Obviously achievable, right? Right?!?
But then I’m like, A-HA!!! You know what I’ve got in the bag? Erm, not my shopping bag, but My Bag of Life Tricks?
SNACKS!
Take that, back-to-school too cool for school (but not really because, uh, back-to-SCHOOL)!!
The one thing being past the age/era/no inclination to go back-to-school BTS fervor that can’t be taken away from me! And I shall purchase all the sparkly rainbow-colored pencil cases I please! Hmph!
Ahem… anyway, one of my fave snacks from back in the day were KIND bars. Does anyone know whether or not I’m supposed to be screaming KIND like that? Let’s just say I’ve been screaming INTERNET, ADULTING and those other words in an ode to KIND bars.
And now that I think about it, screaming the words INTERNET and ADULTING right next to each other totally was not a good idea. Moving on!
One thing I miss the MOST (there I go again) with store-bought snacks are KIND bars. Weren’t they the best? All the nuts and the good stuff (i.e. add-ins), without any of the filler. Btw, if you’re in the mood for no-filler, guilt-free snacks, you’ve got to make my Oil-Free Paleo Cinnamon Nut Granola!! It’s refined-sugar free, super easy to make, and soon-to-be your favorite healthy snack.
Besides these low carb homemade KIND bars, of course. 😉
So it’s been my plot, my plan, my machination (back to school makes me scheme for no good reason) to create homemade KIND bars like it was nobody’s business! Allow me to present to you my 5 Ingredient Homemade KIND Nut Bars.
I wanted to start with a really basic recipe, something super simple and made with the fewest ingredients possible. Then, upgrade at a later date (spoiler alert!!). I may or may not have a few saucy things planned for future homemade KIND bar recipes (second spoiler alert!!! But not really bc it’s the same one as before!!). 😉 UPDATE: the sauciness has arrived, in the form of homemade dark chocolate sea salt KIND nut bars!
These 5 Ingredient Homemade KIND Nut Bars are:
made with just 5 ingredients (in case you weren’t listening)
(in case you weren’t listening) just 106 calories per bar
… with only 5 grams of sugar , 8 grams of carbs
, 4 grams of protein each!
each! … and yes, could be made with just 3 ingredients if you use just 1 nut and skip the salt
if you use just 1 nut and skip the salt made in just one bowl
prepared in less than 5 minutes
vegan , gluten-free , dairy-free , refined sugar free
, , , crunchy, crispy, TASTY
the perfect combo of salty and sweet
and plant-based and allergy-friendly
and the perfect on-the-go-snack that’s low carb!
that’s low carb! or a delicious dessert
cool enough to get ya back in the back-to-school game!
Grab just 5 ingredients and just one bowl. These 5 Ingredient Homemade KIND Nut Bars will be ready before you know it! I was literally shocked by how quickly I could prep them. 🙂 Gold star!! Okay, I’ve got to tone down my back-to-school obsession! I’m just so excited to be in the kitchen with you, making tasty and healthy homemade treats. I can’t wait to hear what you think. In the meantime, tell me:
What are you most excited about for the back-to-school season?
Sending you all my love and maybe even a dove, xo Demeter ❤
☀︎Click below to Pin ☀︎
☀︎ Did you make this recipe? Take a pic and share it on Instagram with the hashtag #beamingbaker and tag @beamingbaker. I would love to see it! ☀
© beamingbaker.com. All content and images are protected by copyright. Please do not use my images or recipe without my permission. If you wish to republish this recipe, please re-write the entire recipe using your own words and include a link to this post for the recipe.
If you enjoyed this 5 Ingredient Homemade KIND Nut Bars recipe, then you’ll just love these plant-based, allergy-friendly recipes:
☀︎ Gluten Free Vegan Snack Recipes
Dark Chocolate Sea Salt KIND Nut Bars | Gluten Free No Bake Brownie Energy Bites | Gluten Free Vegan Banana Nut Muffins | 3 Ingredient No Bake Peanut Butter Granola Bars | Good Morning Breakfast Cookies | Almond Butter Chocolate Chip Walnut Cookies | No Bake Chewy Chocolate Chip Granola Bars | 4 Ingredient No Bake Peanut Butter Cookie Energy Bars ☀︎ All Vegan, Gluten-Free, Dairy-Free, Protein-Packed and Whole Grain.
Facebook ☀︎ Pinterest ☀︎ Twitter ☀︎ Instagram
|
<gh_stars>0
// Copyright 2019 PayPal Inc.
//
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lib
import (
"errors"
"fmt"
"math/rand"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/paypal/hera/cal"
"github.com/paypal/hera/utility/logger"
)
var bcklgEvtPrefix = [wtypeTotalCount]string{
"BKLG", "BKLG_R", "BKLG_S"}
var bcklgTimeoutEvtPrefix = [wtypeTotalCount]string{
"bklg", "bklg_r", "bklg_s"}
var poolNamePrefix = [wtypeTotalCount]string{
"write", "readonly", "standby"}
// WorkerPool represents a pool of workers of the same kind
// the implementation uses a C++-ish mutex/condition variable/queue rather than a Golang-ish channel + timer
// because the policy for using the worker is LIFO (for better usage of cache) while the channels are FIFO
type WorkerPool struct {
//
// a locking condition used to wait/notify on empty/replenish pool
// this lock and the two worker queues are supposed to be a private member fields
//
poolCond *sync.Cond
activeQ Queue // a queue with active worker client ready to serve traffic
ShardID int // the shard the workers are connected to
Type HeraWorkerType // the worker type like write, read
InstID int
// interval for checking on worker lifespan
lifeSpanCheckInterval int
currentSize int // the number of workers in the pool
desiredSize int // the desired number of workers in the pool, usually equal to currentSize, different for a
// brief period when the pool is dynamically resized
moduleName string // basically the application name as it comes from the command line
// the number of worker not in INIT state, atomically maintained
numHealthyWorkers int32
//
// number of requests in the backlog. we could lock operation to publish state event, but
// status updates after the publishing call inside state log is not inside lock.
// use atomic to synchronize this number.
//
backlogCnt int32
//
// caller receives a ticket when getting a workerclient. caller returns workerclient
// together with the ticket to ensure workerclient is returned by the same caller
// only once.
//
checkoutTickets map[interface{}]string
//
// adaptive queue manager to decide on long/short timeouts and saturation recovery.
//
aqmanager *adaptiveQueueManager
// the actual list of workers
workers []*WorkerClient
}
// Init creates the pool by creating the workers and making all the initializations
func (pool *WorkerPool) Init(wType HeraWorkerType, size int, instID int, shardID int, moduleName string) error {
pool.Type = wType
pool.activeQ = NewQueue()
//pool.poolCond = &sync.Cond{L: &sync.Mutex{}}
pool.poolCond = sync.NewCond(&sync.Mutex{})
pool.lifeSpanCheckInterval = GetConfig().lifeSpanCheckInterval
pool.InstID = instID
pool.ShardID = shardID
pool.currentSize = 0
pool.desiredSize = size
pool.moduleName = moduleName
pool.workers = make([]*WorkerClient, size)
for i := 0; i < size; i++ {
err := pool.spawnWorker(i)
if err != nil {
return err
}
pool.currentSize++
}
pool.checkoutTickets = make(map[interface{}]string)
pool.aqmanager = &adaptiveQueueManager{}
err := pool.aqmanager.init(pool)
go pool.checkWorkerLifespan()
return err
}
// spawnWorker starts a worker and spawn a routine waiting for the "ready" message
func (pool *WorkerPool) spawnWorker(wid int) error {
worker := NewWorker(wid, pool.Type, pool.InstID, pool.ShardID, pool.moduleName)
er := worker.StartWorker()
if er != nil {
if logger.GetLogger().V(logger.Alert) {
logger.GetLogger().Log(logger.Alert, "failed starting worker: ", er)
}
// called from Init will see error
// called from RestartWorkerPool can retry when workers don't come up
return er
}
if logger.GetLogger().V(logger.Info) {
logger.GetLogger().Log(logger.Info, "worker started type ", pool.Type, " id", worker.ID, " instid", pool.InstID, " shardid", pool.ShardID)
}
//
// after establishing uds with the worker, it will be add to active queue
//
// oracle connect errors show up in attach worker
go worker.AttachToWorker()
return nil
}
// RestartWorker is called after a worker exited to perform the necessary cleanup and re-start a new worker.
// In the rare situation where the pool need to be down-sized a new worker is not restarted.
func (pool *WorkerPool) RestartWorker(worker *WorkerClient) (err error) {
if worker == nil {
if logger.GetLogger().V(logger.Alert) {
logger.GetLogger().Log(logger.Alert, "WorkerReady nil, size=", pool.activeQ.Len(), "type=", pool.Type)
}
return nil
}
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "RetartWorker(): ", pool.Type, pool.desiredSize, pool.currentSize, worker.pid, worker.ID)
}
pool.poolCond.L.Lock()
//
// release terminated workerclient (and fd inside) if we havenot done it yet.
//
delete(pool.checkoutTickets, worker)
pool.aqmanager.unregisterDispatchedWorker(worker)
if worker.ID >= pool.desiredSize /*we resize by terminating worker with higher ID*/ {
if logger.GetLogger().V(logger.Verbose) {
logger.GetLogger().Log(logger.Verbose, "Pool type=", pool.Type, ", worker=", worker.pid, "exited, new one not started because pool was resized:", pool.currentSize, "->", pool.desiredSize)
}
pool.currentSize--
if pool.desiredSize == pool.currentSize {
//
// let statlog resets the worker size
//
GetStateLog().PublishStateEvent(StateEvent{eType: WorkerResizeEvt, shardID: pool.ShardID, wType: pool.Type, instID: pool.InstID, newWSize: pool.currentSize})
}
pool.activeQ.Remove(worker)
pool.poolCond.L.Unlock()
return
}
pool.activeQ.Remove(worker)
pool.poolCond.L.Unlock()
er := pool.spawnWorker(worker.ID)
//
// @TODO retry instead of stop the whole startup process
//
if er != nil {
if logger.GetLogger().V(logger.Alert) {
logger.GetLogger().Log(logger.Alert, "failed starting worker: ", er)
}
pool.poolCond.L.Lock()
pool.currentSize--
pool.poolCond.L.Unlock()
return er
}
return nil
}
// WorkerReady is called after the worker started and become available. It puts the worker into the internal list
// of workers as well as in the list of available workers
func (pool *WorkerPool) WorkerReady(worker *WorkerClient) (err error) {
if worker == nil {
if logger.GetLogger().V(logger.Alert) {
logger.GetLogger().Log(logger.Alert, "WorkerReady nil, size=", pool.activeQ.Len(), "type=", pool.Type)
}
return nil
}
pool.poolCond.L.Lock()
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "Pool::WorkerReady", worker.pid, worker.Type, worker.instID)
}
pool.activeQ.Push(worker)
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "poolsize(ready)", pool.activeQ.Len(), " type ", pool.Type, " instance ", pool.InstID)
}
pool.workers[worker.ID] = worker
pool.poolCond.L.Unlock()
//
// notify one waiting agent on the availability of a new worker in the pool
//
pool.poolCond.Signal()
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "poolsize (after signal)", pool.activeQ.Len(), " type ", pool.Type)
}
return nil
}
// GetWorker gets the active worker if available. backlog with timeout if not.
//
// @param sqlhash to check for soft eviction against a blacklist of slow queries.
// if getworker needs to exam the incoming sql, there does not seem to be another elegant
// way to do this except to pass in the sqlhash as a parameter.
// @param timeoutMs[0] timeout in milliseconds. default to adaptive queue timeout.
func (pool *WorkerPool) GetWorker(sqlhash int32, timeoutMs ...int) (worker *WorkerClient, t string, err error) {
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "Pool::GetWorker(start) type:", pool.Type, ", instance:", pool.InstID, ", active: ", pool.activeQ.Len(), "healthy:", pool.GetHealthyWorkersCount())
}
defer func() {
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "Pool::GetWorker(end) type:", pool.Type, ", instance:", pool.InstID, ", active: ", pool.activeQ.Len(), "healthy:", pool.GetHealthyWorkersCount())
}
}()
pool.poolCond.L.Lock()
var workerclient = pool.getActiveWorker()
for workerclient == nil {
if pool.GetHealthyWorkersCount() == 0 {
msg := fmt.Sprintf("REJECT_DB_DOWN_%s%d", poolNamePrefix[pool.Type], pool.InstID)
e := cal.NewCalEvent(cal.EventTypeWarning, msg, cal.TransOK, "")
e.AddDataInt("sql_hash", int64(uint32(sqlhash)))
e.Completed()
pool.poolCond.L.Unlock()
return nil, "", ErrRejectDbDown
}
timeout, longTo := pool.aqmanager.getBacklogTimeout()
if len(timeoutMs) > 0 {
timeout = timeoutMs[0]
}
if timeout == 0 {
// no bklg events!
pool.poolCond.L.Unlock()
return nil, "", errors.New("no worker available")
}
//
// check if we need to evict sql with hash=sqlhash.
//
if pool.aqmanager.shouldSoftEvict(sqlhash) {
pool.poolCond.L.Unlock()
if logger.GetLogger().V(logger.Warning) {
logger.GetLogger().Log(logger.Warning, "soft sql eviction, sql_hash=", uint32(sqlhash))
}
e := cal.NewCalEvent("SOFT_EVICTION", fmt.Sprint(uint32(sqlhash)), cal.TransOK, "")
e.Completed()
return nil, "", ErrSaturationSoftSQLEviction
}
//
// c++ has a REJECT_DB_DOWN check which is mostly an attempt to prevent backlog
// overflow. but bouncer's connection check should have done that already.
// as a result, we do not implment REJECT_DB_DOWN in golang.
//
// client connection can not get an active worker. put it in backlog
//
blgsize := atomic.LoadInt32(&(pool.backlogCnt))
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "add to backlog. type:", pool.Type, ", instance:", pool.InstID, " timeout:", timeout, ", blgsize:", blgsize)
}
if blgsize == 0 {
pool.aqmanager.lastEmptyTimeMs = (time.Now().UnixNano() / int64(time.Millisecond))
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "setlastempty(enter)", pool.aqmanager.lastEmptyTimeMs)
}
}
atomic.AddInt32(&(pool.backlogCnt), 1)
GetStateLog().PublishStateEvent(StateEvent{eType: ConnStateEvt, shardID: pool.ShardID, wType: pool.Type, instID: pool.InstID, oldCState: Idle, newCState: Backlog})
//
// go refused to add a wait timeout https://github.com/golang/go/issues/9578
// wakeup chann return the time this thread spent in the backlog doghouse.
//
wakeupchann := make(chan int64)
go func(cond *sync.Cond) {
startTime := time.Now().UnixNano()
//
// if caller times out and goes away, we need to unlock after waking up.
//
cond.Wait()
cond.L.Unlock()
//
// if backlog times out on this channel, noone will be listening on the other
// end, which blocks a write to wakeupchann. to avoid dangling thread, use unblocking.
// if notify wakes up such a thread, other thread in backlog will need another notify
// to wake up even there is already one free worker in the pool. worker are getting
// returned consistenly, so we are not worried about this little delay. cond.broadcast
// may resolve the delay in this corner case, but letting all waiting threads to race
// for one free worker, with one winner and the rest loopin back into wait is the cost.
//
select {
case wakeupchann <- ((time.Now().UnixNano() - startTime) / int64(time.Millisecond)):
default:
}
close(wakeupchann)
}(pool.poolCond)
select {
case <-time.After(time.Millisecond * time.Duration(timeout)):
//
// lock to protect accessing clearAllEvictedSqlhash
//
pool.poolCond.L.Lock()
pool.resetIfLastBacklogEntry("timeout")
pool.decBacklogCnt()
pool.poolCond.L.Unlock()
//
// backlog timeout. change connstate to idle, and return error.
// caller will close client connection that takes connstate
// further from idle to close.
//
GetStateLog().PublishStateEvent(StateEvent{eType: ConnStateEvt, shardID: pool.ShardID, wType: pool.Type, instID: pool.InstID, oldCState: Backlog, newCState: Idle})
//
// log a backlog timeout event.
//
msg := fmt.Sprintf("timeout %d no idle child & req %s%d backlog timed out, close client connection", timeout, poolNamePrefix[pool.Type], pool.InstID)
var ename string
if longTo {
if GetConfig().EnableSharding {
ename = fmt.Sprintf("%s%d_shd%d_timeout", bcklgTimeoutEvtPrefix[pool.Type], pool.InstID, pool.ShardID)
} else {
ename = fmt.Sprintf("%s%d_timeout", bcklgTimeoutEvtPrefix[pool.Type], pool.InstID)
}
} else {
if GetConfig().EnableSharding {
ename = fmt.Sprintf("%s%d_shd%d_eviction", bcklgTimeoutEvtPrefix[pool.Type], pool.InstID, pool.ShardID)
} else {
ename = fmt.Sprintf("%s%d_eviction", bcklgTimeoutEvtPrefix[pool.Type], pool.InstID)
}
}
e := cal.NewCalEvent(cal.EventTypeWarning, ename, cal.TransOK, msg)
e.Completed()
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "backlog timeout. type:", pool.Type, ", instance:", pool.InstID)
}
//
// we are bailing out. but the waiting routine is still sleeping.
//
pool.poolCond.Signal() // try to jostle the waiting routine free
if longTo {
return nil, "", ErrBklgTimeout
}
return nil, "", ErrBklgEviction
case sleepingtime, _ := <-wakeupchann:
pool.poolCond.L.Lock() // relock after wakeup routine unlocks on its exit
//
// log a backlog wakeup event.
//
var etype string
if GetConfig().EnableSharding {
etype = fmt.Sprintf("%s%d_shd%d", bcklgEvtPrefix[pool.Type], pool.InstID, pool.ShardID)
} else {
etype = fmt.Sprintf("%s%d", bcklgEvtPrefix[pool.Type], pool.InstID)
}
if longTo {
etype += "_long"
} else {
etype += "_short"
}
ename := fmt.Sprintf("%d", (sleepingtime / GetConfig().BacklogTimeoutUnit))
e := cal.NewCalEvent(etype, ename, cal.TransOK, strconv.Itoa(int(sleepingtime)))
e.Completed()
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "exiting backlog. type:", pool.Type, ", instance:", pool.InstID)
}
workerclient = pool.getActiveWorker()
//
// we still have the lock. if there are other connections also woke up but lost the
// race to acquire the lock, backlog stats still have them counted.
// if backlog count is 1
// if workerclient != nil, we are the only one racing and will be the last one
// exiting backlog
// if workerclient == nil, we did not win the race and are going back to backlog
//
if workerclient != nil {
pool.resetIfLastBacklogEntry("acquire")
}
//
// reduce even if workerclient is null since we add to blgcnt going back to the top.
//
pool.decBacklogCnt()
//
// a connection was waken up from backlog, reset backlog and idle count.
// it is possible some other thread races ahead and get the worker
// just being returned. if that happens, we get a nil worker and return
// back into backlog at the top of the for loop.
//
GetStateLog().PublishStateEvent(StateEvent{eType: ConnStateEvt, shardID: pool.ShardID, wType: pool.Type, instID: pool.InstID, oldCState: Backlog, newCState: Idle})
}
}
ticket := fmt.Sprintf("%d", rand.Uint64())
//
// error causes coordinator to disconnect external client
//
if pool.aqmanager.alreadyDispatched(ticket, workerclient) {
msg := fmt.Sprintf("pid=%d;pooltype=%d", workerclient.pid, pool.Type)
e := cal.NewCalEvent(cal.EventTypeWarning, "double_dispatch", cal.TransOK, msg)
e.Completed()
pool.poolCond.L.Unlock()
return nil, "", errors.New("double_dispatch")
}
pool.checkoutTickets[workerclient] = ticket
pool.aqmanager.registerDispatchedWorker(ticket, workerclient)
pool.poolCond.L.Unlock()
GetStateLog().PublishStateEvent(StateEvent{eType: ConnStateEvt, shardID: pool.ShardID, wType: pool.Type, instID: pool.InstID, oldCState: Idle, newCState: Assign})
//another best effort, in case ReturnWorker() lost the race
wchLen := len(workerclient.channel())
// drain the channel if data late
if wchLen > 0 {
workerclient.DrainResponseChannel(0 /*no wait to minimize the latency*/)
}
return workerclient, ticket, nil
}
// ReturnWorker puts the worker into the list of available workers. It is called usually after a coordinator
// used it for requests and no longer needs it.
// If the pool is about to be downsize, the worker is instead terminated instead of being put in the available list.
// It the worker lifetime expired, the worker is instead terminated instead of being put in the available list.
func (pool *WorkerPool) ReturnWorker(worker *WorkerClient, ticket string) (err error) {
now := time.Now().Unix()
//
// has to lock before checking QUCE. otherwise, we check and pass QUCE, someone else lock,
// we block, someone else set QUCE to prevent worker return, someone else unlock, we lock,
// we already passed QUCE and return worker by mistake.
//
pool.poolCond.L.Lock()
if (worker == nil) || (worker.Status == wsQuce) {
if logger.GetLogger().V(logger.Alert) {
logger.GetLogger().Log(logger.Alert, "trying to return an invalid worker (bailing), size=", pool.activeQ.Len(), "type=", pool.Type, ", instance:", pool.InstID)
}
pool.poolCond.L.Unlock()
return nil
}
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "Pool::ReturnWorker(start)", worker.pid, worker.Type, worker.instID, "healthy:", pool.GetHealthyWorkersCount())
}
if (len(ticket) == 0) || (pool.checkoutTickets[worker] != ticket) {
msg := fmt.Sprintf("pid=%d;pooltype=%d", worker.pid, pool.Type)
e := cal.NewCalEvent(cal.EventTypeWarning, "rtrn_worker_using_wrong_ticket", cal.TransOK, msg)
e.Completed()
pool.poolCond.L.Unlock()
return errors.New("returning a worker using wrong ticket")
}
delete(pool.checkoutTickets, worker)
pool.aqmanager.unregisterDispatchedWorker(worker)
if (worker.channel() != nil) && (len(worker.channel()) > 0) {
e := cal.NewCalEvent(cal.EventTypeWarning, "rtrn_worker_having_unprocessed_msg", cal.TransOK, strconv.Itoa(len(worker.channel())))
e.Completed()
worker.DrainResponseChannel(time.Microsecond * 10)
}
worker.setState(wsAcpt)
if (pool.desiredSize < pool.currentSize) && (worker.ID >= pool.desiredSize) {
go func(w *WorkerClient) {
if logger.GetLogger().V(logger.Info) {
logger.GetLogger().Log(logger.Info, "Pool resized, terminate worker: pid =", worker.pid, ", pool_type =", worker.Type, ", inst =", worker.instID)
}
w.Terminate()
}(worker)
//pool.currentSize-- // restartworker actually does the size reduction.
pool.poolCond.L.Unlock()
return nil
}
// check for the lifespan
if (worker.exitTime != 0) && (worker.exitTime <= now) {
//
// reset exit time to prevent checkWorkerLifespan from terminating this worker again.
//
worker.exitTime = 0
go func(w *WorkerClient) {
if logger.GetLogger().V(logger.Info) {
logger.GetLogger().Log(logger.Info, "Lifespan exceeded, terminate worker: pid =", worker.pid, ", pool_type =", worker.Type, ", inst =", worker.instID)
}
w.Terminate()
}(worker)
pool.poolCond.L.Unlock()
return nil
}
//
// check for max requests which can change at runtime.
//
maxReqs := GetMaxRequestsPerChild()
if (maxReqs > (worker.maxReqCount + worker.maxReqCount/4)) ||
(maxReqs < (worker.maxReqCount - worker.maxReqCount/4)) {
if maxReqs >= 4 {
worker.maxReqCount = maxReqs - uint32(rand.Intn(int(maxReqs/4)))
}
if logger.GetLogger().V(logger.Info) {
logger.GetLogger().Log(logger.Info, "Max requests change pickedup pid =", worker.pid, "cnt", worker.reqCount, "max", worker.maxReqCount)
}
}
if worker.maxReqCount != 0 {
//worker.reqCount++ // count in dorequest for each statement instead of for each session.
if worker.reqCount >= worker.maxReqCount {
go func(w *WorkerClient) {
if logger.GetLogger().V(logger.Info) {
logger.GetLogger().Log(logger.Info, "Max requests exceeded, terminate worker: pid =", worker.pid, ", pool_type =", worker.Type, ", inst =", worker.instID, "cnt", worker.reqCount, "max", worker.maxReqCount)
}
w.Terminate()
}(worker)
pool.poolCond.L.Unlock()
return nil
}
}
var pstatus = false
if GetConfig().LifoScheduler {
pstatus = pool.activeQ.PushFront(worker)
} else {
pstatus = pool.activeQ.Push(worker)
}
blgsize := atomic.LoadInt32(&(pool.backlogCnt))
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "poolsize (after return)", pool.activeQ.Len(), " type ", pool.Type, ", instance:", pool.InstID, ", pushstatus:", pstatus, ", bklg:", blgsize, worker.pid)
}
pool.poolCond.L.Unlock()
//
// notify one waiting agent on the availability of a new worker in the pool
//
pool.poolCond.Signal()
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "Pool::ReturnWorker(end after signal)", pool.activeQ.Len(), " type ", pool.Type, "healthy:", pool.GetHealthyWorkersCount(), worker.pid)
}
return nil
}
/**
* caller has lock
*/
func (pool *WorkerPool) getActiveWorker() (worker *WorkerClient) {
var workerclient *WorkerClient
var cnt = pool.activeQ.Len()
for cnt > 0 {
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "poolsize (before get)", pool.activeQ.Len(), " type ", pool.Type, ", instance:", pool.InstID)
}
workerclient = pool.activeQ.Poll().(*WorkerClient)
if workerclient.Status > wsInit {
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "Pool::SelectWorker", workerclient.pid, workerclient.Type, pool.InstID)
}
return workerclient
}
cnt--
pool.activeQ.Push(workerclient) // put it at the end of queue
}
return nil
}
// Resize resize the worker pool when the corresponding dynamic configuration changed.
// When the size is increased, the increase is immediate by spawning the necessary number of new workers.
// When the size is decreased, it removes the workers whose id is bigger then the number of workers. If
// the workers to be removed are free, they are terminated immediately, otherwise the termination is delayed
// until the worker eventually calls ReturnWorker to make itself available
func (pool *WorkerPool) Resize(newSize int) {
if logger.GetLogger().V(logger.Verbose) {
logger.GetLogger().Log(logger.Verbose, "Resizing pool:", pool.Type, pool.currentSize, "->", newSize)
}
pool.poolCond.L.Lock()
defer pool.poolCond.L.Unlock()
if newSize == pool.desiredSize {
return
}
pool.desiredSize = newSize
if pool.desiredSize > pool.currentSize {
// worker increase
//
workers := make([]*WorkerClient, pool.desiredSize)
copy(workers, pool.workers)
pool.workers = workers
// let statlog resets the worker size
//
GetStateLog().PublishStateEvent(StateEvent{eType: WorkerResizeEvt, shardID: pool.ShardID, wType: pool.Type, instID: pool.InstID, newWSize: newSize})
for i := pool.currentSize; i < newSize; i++ {
worker := NewWorker(i, pool.Type, pool.InstID, pool.ShardID, pool.moduleName)
er := worker.StartWorker()
if er != nil {
if logger.GetLogger().V(logger.Alert) {
logger.GetLogger().Log(logger.Alert, "failed starting worker: ", er)
}
return
}
go worker.AttachToWorker()
}
pool.currentSize = pool.desiredSize
} else {
// remove the idle/free workers now. workers not free with ID > pool.desiredSize are terminated in ReturnWorker
remove := func(item interface{}) bool {
worker := item.(*WorkerClient)
if worker.ID >= pool.desiredSize {
// run in go routine so it doesn't block
go func(w *WorkerClient) {
if logger.GetLogger().V(logger.Info) {
logger.GetLogger().Log(logger.Info, "Pool resized, terminate worker: pid =", worker.pid, ", pool_type =", worker.Type, ", inst =", worker.instID)
}
w.Terminate()
}(worker)
return true
}
return false
}
pool.activeQ.ForEachRemove(remove)
}
}
// Healthy checks if the number of workers connected to the database is greater than 20%
func (pool *WorkerPool) Healthy() bool {
pool.poolCond.L.Lock()
size := pool.currentSize
pool.poolCond.L.Unlock()
numHealthyWorkers := atomic.LoadInt32(&(pool.numHealthyWorkers))
if logger.GetLogger().V(logger.Verbose) {
logger.GetLogger().Log(logger.Verbose, "Healthy check pool type =", pool.Type, ", id =", pool.InstID, ", healthy = ", numHealthyWorkers, ", size =", size)
}
return (numHealthyWorkers * 100) >= (int32(size) * 20)
}
// IncHealthyWorkers called to increment the number of workers conected to the database
func (pool *WorkerPool) IncHealthyWorkers() {
atomic.AddInt32(&(pool.numHealthyWorkers), 1)
}
// DecHealthyWorkers called to decrement the number of workers conected to the database
func (pool *WorkerPool) DecHealthyWorkers() {
atomic.AddInt32(&(pool.numHealthyWorkers), -1)
}
// GetHealthyWorkersCount creturns the number of workers conected to the database
func (pool *WorkerPool) GetHealthyWorkersCount() int32 {
return atomic.LoadInt32(&(pool.numHealthyWorkers))
}
// RacMaint is called when rac maintenance is needed. It marks the workers for restart, spreading
// to an interval in order to avoid connection storm to the database
func (pool *WorkerPool) RacMaint(racReq racAct) {
if logger.GetLogger().V(logger.Info) {
logger.GetLogger().Log(logger.Info, "Rac maint processing, shard =", pool.ShardID, ", inst =", racReq.instID, ", time=", racReq.tm)
}
now := time.Now().Unix()
window := GetConfig().RacRestartWindow
dbUname := ""
cnt := 0
pool.poolCond.L.Lock()
for i := 0; i < pool.currentSize; i++ {
if (pool.workers[i] != nil) && (racReq.instID == 0 || pool.workers[i].racID == racReq.instID) && (pool.workers[i].startTime < int64(racReq.tm)) {
statusTime := now
// requested time is in the past, restart starts from now
// requested time is in the future, set restart time starting from it
if now < int64(racReq.tm) {
statusTime = int64(racReq.tm)
}
if racReq.delay {
pool.workers[i].exitTime = statusTime + int64(window*i/pool.currentSize)
} else {
pool.workers[i].exitTime = statusTime
}
if logger.GetLogger().V(logger.Verbose) {
logger.GetLogger().Log(logger.Verbose, "Rac maint activating, worker", i, pool.workers[i].pid, "exittime=", pool.workers[i].exitTime, now, window, pool.currentSize)
}
cnt++
if len(dbUname) == 0 {
dbUname = pool.workers[i].dbUname
}
}
}
pool.poolCond.L.Unlock()
// TODO: C++ worker logs one event for each worker, in the worker, so
// we keep the same. Think about changing it
for i := 0; i < cnt; i++ {
evt := cal.NewCalEvent("RAC_ID", fmt.Sprintf("%d", racReq.instID), cal.TransOK, "")
evt.Completed()
evt = cal.NewCalEvent("DB_UNAME", dbUname, cal.TransOK, "")
evt.Completed()
}
}
// checkWorkerLifespan is called periodically to check if any worker lifetime has expired and terminates it
func (pool *WorkerPool) checkWorkerLifespan() {
var skipcnt uint32
var cutofftime uint32
for {
if skipcnt < 90 {
skipcnt = skipcnt + 1
} else {
skipcnt = 0
//
// bigger one
//
idleto := uint32(GetTrIdleTimeoutMs())
dummy := uint32(GetIdleTimeoutMs())
if dummy > idleto {
idleto = dummy
}
//
// terminate worker if it stays dispatched more than 3 times the idle timeout ago.
//
idleto = 3 * idleto
//
// worker.sqlStartTimeMs is measured since the start of mux.
//
muxnow := uint32((time.Now().UnixNano() - GetStateLog().GetStartTime()) / int64(time.Millisecond))
cutofftime = muxnow - idleto
}
var workers []*WorkerClient
now := time.Now().Unix()
pool.poolCond.L.Lock()
for i := 0; i < pool.currentSize; i++ {
if (pool.workers[i] != nil) && (pool.workers[i].exitTime != 0) && (pool.workers[i].exitTime <= now) {
if pool.activeQ.Remove(pool.workers[i]) {
workers = append(workers, pool.workers[i])
//
// reset exit time to prevent return worker from terminating this worker again.
//
pool.workers[i].exitTime = 0
} else {
if GetConfig().EnableDanglingWorkerRecovery {
//
// if disabled, ignore (the worker is in use, it will be checked when freed)
// otherwise check every 15 min to see if worker.sqlstarttimems is greater
// than 3 x idletimeout. catch dangling worker not returned by coordinator
//
if skipcnt == 0 {
stime := atomic.LoadUint32(&(pool.workers[i].sqlStartTimeMs))
//
// could be worker is dispatched but coordinator has not set stime yet.
//
if stime != 0 {
if stime < cutofftime {
workers = append(workers, pool.workers[i])
pool.workers[i].exitTime = 0
evt := cal.NewCalEvent(cal.EventTypeWarning, "terminate_dispatched_worker", cal.TransOK, fmt.Sprintf("%d", pool.workers[i].pid))
evt.Completed()
}
}
}
}
}
}
}
pool.poolCond.L.Unlock()
for _, w := range workers {
if logger.GetLogger().V(logger.Info) {
logger.GetLogger().Log(logger.Info, "checkworkerlifespan - Lifespan exceeded, terminate worker: pid =", w.pid, ", pool_type =", w.Type, ", inst =", w.instID)
}
w.Terminate()
}
time.Sleep(time.Duration(pool.lifeSpanCheckInterval) * time.Second)
}
}
/**
* check to see if backlog will become empty
* @param loc who is calling us.
*/
func (pool *WorkerPool) resetIfLastBacklogEntry(loc string) {
blgsize := atomic.LoadInt32(&(pool.backlogCnt))
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "resetIfLastBacklogEntry blgsize", blgsize, loc)
}
if blgsize == 1 {
now := time.Now().UnixNano() / int64(time.Millisecond)
duration := now - pool.aqmanager.lastEmptyTimeMs
var ename string
if GetConfig().EnableSharding {
ename = fmt.Sprintf("aq%s%d_shd%d", bcklgTimeoutEvtPrefix[pool.Type], pool.InstID, pool.ShardID)
} else {
ename = fmt.Sprintf("aq%s%d", bcklgTimeoutEvtPrefix[pool.Type], pool.InstID)
}
evt := cal.NewCalEvent("QUEUE", ename, cal.TransOK, fmt.Sprintf("%d", duration))
evt.AddDataStr("stime", fmt.Sprintf("%d&etime=%d %s", pool.aqmanager.lastEmptyTimeMs, now, loc))
evt.Completed()
pool.aqmanager.lastEmptyTimeMs = now
pool.aqmanager.clearAllEvictedSqlhash()
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "setlastempty(exit)", loc, pool.aqmanager.lastEmptyTimeMs)
}
}
}
func (pool *WorkerPool) decBacklogCnt() {
if atomic.LoadInt32(&(pool.backlogCnt)) > 0 {
atomic.AddInt32(&(pool.backlogCnt), -1)
} else {
logger.GetLogger().Log(logger.Warning, "invalid backlogCnt (acquire)")
e := cal.NewCalEvent(cal.EventTypeWarning, "negative bcklgCnt", cal.TransOK, "")
e.Completed()
atomic.StoreInt32(&(pool.backlogCnt), 0)
}
}
|
class ComputeReadParameters:
"""A class that computes read parameters for slides in a tensorflow dataset.
An instance of class histomics_stream.dsm.wsi.ComputeReadParameters can be supplied as an argument
to tensorflow.dataset.map. histomics_stream.dsm.wsi.ComputeReadParameters computes level, factor,
width, and height from the inputs filename, magnification, and tolerance.
histomics_stream.dsm.wsi.ComputeReadParameters adds new key-value pairs to the tensorflow dictionary
for the newly computed values. Ideally the implementation would be all tf.function (i.e., a
tensorflow graph function); however, much of the code is via a tensorflow py_function because our
current implementation for discerning the size of an image without reading in the pixel values uses
non-tensorflow packages, such as openslide.
Notes
-----
Note that the __init__ method cannot be decorated with @tf.function for reasons that are not clear,
but might (or might not!) be because an instance of a class (returned by the __init__ method) is not
a tensorflow object.
"""
def __init__(self, tolerance=tf.constant(0.02, dtype=tf.float32)):
self.tolerance = tolerance
@tf.function
def __call__(self, elem):
"""This method is called by tensorflow to do the work of this class."""
level, factor, width, height = tf.py_function(
func=self._py_compute_read_parameters,
inp=[elem["filename"], elem["magnification"], self.tolerance],
Tout=(tf.int32, tf.float32, tf.int32, tf.int32),
)
response = {
**elem,
"level": level,
"factor": factor,
"width": width,
"height": height,
}
return response
def _py_compute_read_parameters(self, filename_in, magnification_in, tolerance_in):
"""This method is the internal py_function (i.e. not @tf.function) that does the actual work of
this class.
"""
filename = filename_in.numpy().decode("utf-8")
magnification = magnification_in.numpy()
tolerance = tolerance_in.numpy()
if re.compile(r"\.svs$").search(filename):
# read whole-slide image file and create openslide object
os_obj = os.OpenSlide(filename)
# measure objective of level 0
objective = np.float32(os_obj.properties[os.PROPERTY_NAME_OBJECTIVE_POWER])
# calculate magnifications of levels
estimated = np.array(objective / os_obj.level_downsamples)
# Find best level to use and its factor
level, factor = self._get_level_and_factor(magnification, estimated, tolerance)
# get slide width, height at desired magnification. (Note width before height)
width, height = os_obj.level_dimensions[level]
elif re.compile(r"\.zarr$").search(filename):
# read whole-slide image and create zarr objects
store = zarr.DirectoryStore(filename)
source_group = zarr.open(store, mode="r")
# measure objective of level 0
objective = np.float32(source_group.attrs[os.PROPERTY_NAME_OBJECTIVE_POWER])
# calculate magnifications of levels
estimated = np.array(objective / source_group.attrs["level_downsamples"])
# Find best level to use and its factor
level, factor = self._get_level_and_factor(magnification, estimated, tolerance)
# get slide width, height at desired magnification. (Note height before width)
height, width = source_group[format(level)].shape[0:2]
else:
# We don't know magnifications so assume reasonable values for level and factor.
level = 0
factor = 1.0
if True:
pil_obj = Image.open(filename)
width, height = pil_obj.size
else:
# For the case that we know the image size without opening the file
width, height = 2048, 2048
print(f"level = {level}, factor = {factor}, width = {width}, height = {height}")
return level, factor, width, height
def _get_level_and_factor(self, magnification, estimated, tolerance):
"""This method computes level and factor for _py_compute_read_parameters."""
# calculate difference with magnification levels
delta = magnification - estimated
# match to existing levels
if np.min(np.abs(np.divide(delta, magnification))) < tolerance: # match
level = np.squeeze(np.argmin(np.abs(delta)))
factor = 1.0
elif np.any(delta < 0):
value = np.max(delta[delta < 0])
level = np.squeeze(np.argwhere(delta == value)[0])
factor = magnification / estimated[level]
else: # desired magnification above base level - throw error
raise ValueError("Cannot interpolate above scan magnification.")
return level, factor
|
/**
* Sets the upper 3x3 of this matrix by multiplying m1 with m2: this = m1 x m2
* @throws NullPointerException if m1 or m2 are null
*/
private final void _mult3(Mat4d m1, Mat4d m2) {
double _d00, _d01, _d02;
_d00 = m1.d00 * m2.d00 + m1.d01 * m2.d10 + m1.d02 * m2.d20;
_d01 = m1.d00 * m2.d01 + m1.d01 * m2.d11 + m1.d02 * m2.d21;
_d02 = m1.d00 * m2.d02 + m1.d01 * m2.d12 + m1.d02 * m2.d22;
double _d10, _d11, _d12;
_d10 = m1.d10 * m2.d00 + m1.d11 * m2.d10 + m1.d12 * m2.d20;
_d11 = m1.d10 * m2.d01 + m1.d11 * m2.d11 + m1.d12 * m2.d21;
_d12 = m1.d10 * m2.d02 + m1.d11 * m2.d12 + m1.d12 * m2.d22;
double _d20, _d21, _d22;
_d20 = m1.d20 * m2.d00 + m1.d21 * m2.d10 + m1.d22 * m2.d20;
_d21 = m1.d20 * m2.d01 + m1.d21 * m2.d11 + m1.d22 * m2.d21;
_d22 = m1.d20 * m2.d02 + m1.d21 * m2.d12 + m1.d22 * m2.d22;
this.d00 = _d00; this.d01 = _d01; this.d02 = _d02;
this.d10 = _d10; this.d11 = _d11; this.d12 = _d12;
this.d20 = _d20; this.d21 = _d21; this.d22 = _d22;
}
|
<reponame>jreidmke/floss
// import Link from 'next/link';
// import { Button } from '../button/Button';
// import { CTABanner } from '../cta/CTABanner';
import { Carousel } from 'react-responsive-carousel';
import { Section } from '../layout/Section';
// import ReactDOM from 'react-dom';
import 'react-responsive-carousel/lib/styles/carousel.min.css'; // requires a loader
const React = require('react');
const Banner = () => (
<Section>
{/* <CTABanner
title="Testimonials"
subtitle="Start your Free Trial."
button={
<Link href="https://creativedesignsguru.com/category/nextjs/">
<a>
<Button>Get Started</Button>
</a>
</Link>
}
/> */}
<Carousel
showThumbs={Boolean(false)}
showArrows={Boolean(true)}
infiniteLoop={Boolean(true)}
autoPlay={Boolean(true)}
transitionTime={Number(500)}
>
<div className="text-center flex flex-col p-4 sm:text-left sm:flex-col sm:items-center sm:justify-between sm:p-12 bg-primary-100 rounded-md">
<p className="text-center text-lg leading-9">
"Thanks floss! Floss is great. I like floss more and more each
day because it makes my life a lot easier."
</p>
<p className="mt-4 font-extrabold"><NAME>.</p>
<img
className="object-scale-down h-28 mt-4"
src="/assets/images/person1.jpg"
/>
</div>
<div className="text-center flex flex-col p-4 sm:text-left sm:flex-col sm:items-center sm:justify-between sm:p-12 bg-pink-200 rounded-md">
<p className="text-center text-lg leading-9">
"I am so pleased with this product. The service was excellent.
Floss was the best investment I ever made."
</p>
<p className="mt-4 font-extrabold"><NAME>.</p>
<img
className="object-scale-down h-28 mt-4"
src="/assets/images/person2.jpg"
/>
</div>
<div className="text-center flex flex-col p-4 sm:text-left sm:flex-col sm:items-center sm:justify-between sm:p-12 bg-primary-100 rounded-md">
<p className="text-center text-lg leading-9">
"What a great product. I will let my mum know about this, she
could really make use of floss!"
</p>
<p className="mt-4 font-extrabold"><NAME>.</p>
<img
className="rounded-md object-scale-down h-28 mt-4"
src="/assets/images/person3.jpg"
/>
</div>
</Carousel>
</Section>
);
export { Banner };
|
Industrial corn flour enrichment with whole amaranth flour and milling fractions in corn-based products.
Whole flour and milling fractions of raw amaranth seeds were used in 90:10, 80:20 and 50:50 mixtures with industrialized corn flour (MINSA) to prepare tortillas and arepas, basic nutritional foods in several Latin American countries. The three corn-amaranth mixtures showed a good protein and fat content as well as amino acid profile, and presented adequate physical characteristics for making tortillas. Amaranth whole flour and commercial corn flour mixtures in the proportion of 80:20 and 50:50 were found suitable for the preparation of arepas. Protein and fat content were substantially improved, with no changes in organoleptic characteristics. The Mexican type of Amaranthus cruentus, selected due to its availability and bromatologic properties, yielded products of excellent nutritional quality, according to their amino acid content and protein efficiency ratio (PER). The protein-rich (1R) and starchy (2-R) fractions obtained by air classification, also yielded good results when substituting amaranth flour. The afore-mentioned flours and air-classified fractions of the 50:50 mixtures proved to be adequate in gruel preparations when used in 1:8 and 1:12 dilutions, as they improved their organoleptic characteristics. Flakes and extrudates were also used, yielding products with a 13.3-15% protein content, 1.7-3.7% fat, and 65.2-74.2% carbohydrates. In addition, extrudates were utilized to prepare snacks of better nutritional quality than existing similar commercial products. Improvement of the tortilla's mineral and fatty acid contents was achieved in every case. Enrichment of this product with whole amaranth flour is, therefore, recommendable for use in programs aimed at improving the nutritional status of the population.
|
Fixed‐parameter decidability: Extending parameterized complexity analysis
We extend the reach of fixed‐parameter analysis by introducing classes of parameterized sets defined based on decidability instead of complexity. Known results in computability theory can be expressed in the language of fixed‐parameter analysis, making use of the landscape of these new classes. On the one hand this unifies results that would not otherwise show their kinship, while on the other it allows for further exchange of insights between complexity theory and computability theory. In the landscape of our fixed‐parameter decidability classes, we recover part of the classification of real numbers according to their computability. From this, using the structural properties of the landscape, we get a new proof of the existence of P‐selective bi‐immune sets. Furthermore, we show that parameter values in parameterized sets in our uniformly fixed‐parameter decidability classes interact with both instance complexity and Kolmogorov complexity. By deriving a parameter based upper bound on instance complexity, we demonstrate how parameters convey a sense of randomness. Motivated by the instance complexity conjecture, we show that the upper bound on the instance complexity is infinitely often also an upper bound on the Kolmogorov complexity.
Introduction
In the 1990s, four papers of Downey and Fellows enriched the field of computational complexity with parameterized complexity. In particular, their class, FPT, of fixed-parameter tractable sets became a popular alternative to the long-standing representative of tractability, the class P. The addition of parameters enables new ways of using limits and recursive definitions. Of particular interest are properties of elements that are preserved in the limit. For such properties, parameterized reasoning is especially suitable.
As it turns out, parameterized analysis was present in computability theory long before it found its way to complexity theory, despite the strong ties between both areas. However, for computability theory, a parameterized framework was never explicitly constructed. We shall make a start in filling this gap by showing how some classical results in computability theory can be neatly phrased in a context with parameters.
It will become clear that a naive formulation of decidability in the presence of parameters, fixed-parameter decidability, wherein sets are included if the subsets corresponding to fixed parameter values are decidable, is too broad a concept to be meaningful. For example, any set for which the subsets for fixed parameter values are finite or cofinite would be considered fixed-parameter decidable. We shall explore two ways of augmenting the above definition in order to make fixed-parameter decidability applicable to settings of interest. The first is by posing additional uniformity conditions, which we do in § 3. The second is by confining uniformity into oracles, which we do in § 4. Wrapping nonuniformity in oracles inspires a novel type of fixed-parameter reducibility, which is of interest not only for fixed-parameter decidability, but also takes a natural place in the fixed-parameter tractability landscape.
Applications of fixed-parameter decidability to domains other than fixed-parameter tractability are given in § § 5 & 6. A link with the computability of real numbers is shown in § 5. There, we also use the structural arrangement of fixed-parameter decidability classes in a novel proof of the existence of a P-selective bi-immune set. In § 6, typical measures of randomness are connected to the landscape of fixed-parameter decidability classes.
Parameter values are shown to be related to both Kolmogorov complexity and instance complexity. This sheds new light on the instance complexity conjecture.
Preliminaries
In this text, we shall use a binary alphabet 2 = {0, 1} and denote by 2 + the set of finite non-empty binary strings. The set of natural numbers will be denoted by N. Central to the discussion will be the notion of a parameterized set.
Definition 2.1 A parameterized set is a subset of 2 + × N. Given a parameter k ∈ N, the kth slice of a parameterized set A is the set A parameterized set A is convergent if we have: (1) A convergent parameterized set A is said to converge to the unique limit A ∞ extending (1).
Throughout this text, a set named A will be a parameterized set, whereas a set named B will be an arbitrary set.
Parameterized sets that are not convergent can be interesting. In fact, the theory we shall develop in § 4 makes very little use of convergence. Nevertheless, fixed-parameter analysis is normally involved with convergent sets. Moreover, the implied convergence when considering fixed-parameter tractability is often of a particularly strong kind.
Definition 2.2 A parameterized set
A is monotonically non-decreasing, or simply monotonic, if, for all k ∈ N, we have A k ⊆ A k+1 . Every monotonic parameterized set A is convergent and it converges to the set A ∞ = k A k .
The alternative definition of a parameterized set that is used by Flum and Grohe internalizes monotonicity and represents a kind of modulus of convergence. As a result, it poses a substantial restriction on what sets can be considered fixed-parameter tractable. Since most of the current work would not be possible with a monotonicity requirement in the definition of a parameterized set, we argue that the definition of Flum and Grohe is overly restrictive, even when focussing on computational complexity.
We shall use the standard notion of computability derived from accepting the Turing machine as a model for effective computation. Familiarity with Turing machines and oracle Turing machines is assumed. For a short overview of the relevant definitions, we refer to and . Notational conventions are mostly taken from , in particular, the set of halting Turing machines, as encoded in 2 + , is denoted by ∅ , the Turing jump of the empty set.
As is common, we shall define computable partial functions by associating, through coding, the input and output of a Turing machine with arguments and function values, respectively. Here, we leave the function value undefined if the Turing machine does not halt. Thus the class of Turing machines corresponds to the class of computable partial functions. A computable partial function is simply a computable function if it is nowhere undefined. We also define some related notions, which have to do with deciding the membership question of elements of sets.
Definition 2.3
A Turing machine is a partial decision procedure for a given set B if we have, for all x: Note that no requirements are in place for the situation where, given x as input, does not halt or outputs anything other than 0 or 1. A set B for which a partial decision procedure exists that outputs 1 on all members of B, in other words, a procedure that correctly identifies the members of B, is called semidecidable. Semidecidable sets are also known as computably enumerable sets or recursively enumerable sets. A partial decision procedure for a www.mlq-journal.org set B that always halts is called a consistent procedure with respect to B. Such a procedure can be interpreted as one that, when asked whether some x is a member of B answers 'yes', 'no', or 'unresolved'. Further strengthening the definition by requiring that the output of is always in 2, in other words, forbidding the 'unresolved' answer, yields the definition of a decision procedure. Sets for which a decision procedure exist are called decidable.
Turning to oracle Turing machines instead of plain Turing machines, we obtain familiar reducibility notions.
Definition 2.4 An oracle Turing machine is a Turing reduction from a set B to a set H if we have, for all x: Note that is not required to halt on any input x when equipped with an oracle different from H . Moreover, it is permitted to make queries to the oracle indefinitely. Restricting the latter, demanding that makes only finitely many queries to the oracle, regardless the input, we get the definition of a weak truth-table reduction. Restricting the former, demanding that halts on any input, regardless the oracle, we get the definition of a truth-table reduction. Indeed, a truth-table reduction is also a weak truth-table reduction. We can interpret the finitely many queries a truth-table reduction makes to the oracle on a given input x as variables in a truth table, where the operational value of any given assignment to the variables in that truth table is the output of the reduction. Hence the name truth-table reduction.
Uniform and strongly uniform fixed-parameter decidability
A parameterized study of decidability was started already in 1965 , some 30 years before a parameterized investigation of computational complexity took off. Using the new terminology of fixed-parameter tractability , we can recast several old results into a unified framework. In particular, this opens the door for crossbreeding between parameterized decidability and computability research. One immediate result is that we observe that the most general definitions of fixed-parameter tractability classes make no mention of any convergence criteria, whereas such criteria are part of all parameterized approaches to decidability from the onset. It appears to us that the tractability classes too would benefit from a convergence requirement. For our definition of uniform fixed-parameter decidability, we choose to restrict to convergent parameterized sets. Definition 3.1 A convergent parameterized set A is uniformly fixed-parameter decidable if there is a computable function : 2 + × N → 2 such that (x, k) = 1 if we have x ∈ A k and (x, k) = 0 otherwise.
The set is strongly uniformly fixed-parameter decidable if there is an additional computable bounding function g : 2 + → N such that, for all x ∈ 2 + , we have {k | (x, k) = (x, k + 1)} ≤ g(x).
The classes of uniformly and strongly uniformly fixed-parameter decidable sets are designated by uFPD and suFPD respectively.
Note that a convergent parameterized set is uniformly fixed-parameter decidable precisely when it is decidable. However, decidability of the set is independent of decidability of its limit. Partial application of some as in the above definition yields decision procedures for the slices of a parameterized set A, but this does not extend to A ∞ . For monotonic sets, though, fixed-parameter decidability is related to semidecidability of the limit. 1. There exists a monotonic parameterized set in suFPD that converges to B. 2. The set B is semidecidable.
P r o o f . 1 =⇒ 2. We specify a partial decision procedure for B that halts precisely on the members of B, based on a decision procedure of the monotonic parameterized set. On input x, start iterating over possible values of k until one is found such that we have (x, k) = 1. If one is eventually found, halt and output 1. Otherwise, by necessity, we keep computing forever. www.mlq-journal.org 2 =⇒ 1. We construct a monotonic parameterized set via its decision procedure, using the partial decision procedure ψ of B. On input (x, k), we simulate the computation of ψ(x) for k steps. If ψ halts within k steps, we output accordingly. Otherwise, we output 0.
As monotonic parameterized sets in suFPD are members of suFPD where the bounding function is the constant function yielding 1 (note that the converse is not true), Lemma 3.2 inspires a structure inside uFPD extending semidecidability. The class of semidecidable sets is closed under countable unions, provided the indices of the constituents of the union form a semidecidable set. The same does not go for countable intersections or difference. Already for a finite collection of semidecidable sets, the symmetric difference need not be semidecidable. Closing the semidecidable sets under an increasing number of applications of symmetric difference, we obtain the finite levels of Ershov's difference hierarchy, which stems back to the 1960s . To be precise, the members of the nth level of the difference hierarchy are the sets that can be written as the symmetric difference of n semidecidable sets. We recover this part of the hierarchy within our framework.
Lemma 3.3 The levels of the difference hierarchy coincide with the classes of limits of parameterized sets in suFPD for which the bounding function is constant and the first slice is empty.
The last requirement, the first slice being empty, is of little importance. Dropping it gives us the weak levels of the difference hierarchy , which are positioned so that the nth level of the weak difference hierarchy is between the nth and the (n + 1)th level of the difference hierarchy. From a parameterized view on decidability and with Lemma 3.2 at hand, the above lemma is as much a definition of the difference hierarchy as it is a lemma in its own right . Already in the standard proofs of the strictness of the inclusions in the difference hierarchy we can see the parameterized approach at work. It is possible to extend the difference hierarchy to infinite computable ordinals , but of interest in the context of fixed-parameter decidability is only level ω, which neatly coincides with the class of limits of parameterized sets in suFPD. Within suFPD, a fine grained structure exists driven by unbounded computable functions .
for infinitely many x. Then there is a set that occurs as the limit of a parameterized set in suFPD with bounding function g that does not occur as the limit of any parameterized set in suFPD with bounding function f .
The above lemma decomposes the class of limits of sets in suFPD into a distributive lattice of subclasses based on bounding functions. This lattice lacks a greatest element as there is no pointwise greatest computable function. Nevertheless, suFPD does emerge as a closure of a certain set with respect to a type of reductions .
Theorem 3.5 A set truth-table reduces to ∅ if and only if it is the limit of some parameterized set in suFPD.
P r o o f . =⇒. Note that parameterized sets converging to ∅ exist in suFPD. Now, when, on input (x, k), we evaluate the truth-table corresponding to x on slice k of some chosen parameterized set converging to ∅ , we obtain a parameterized set in suFPD that converges to the desired set.
⇐=. Given a decision function of a parameterized set in suFPD, we can effectively construct a computable partial function that is defined precisely for those x and c for which By repeated queries to the halting set, we can determine the least c for which such a function does not halt for a given x. Based on this information we can decide membership in the limit of the parameterized set that we began with. In the case of suFPD the number of queries necessary is bounded, hence a truth-table reduction to the halting set, ∅ , suffices.
Regarding Theorem 3.5 it is worth noting that when it comes to reducibility to the halting set, truth-table reducibility is equivalent to weak truth-table reducibility .
A theorem similar to Theorem 3.5, but applicable to uFPD instead of suFPD, was proven already in 1959 and is known as the limit lemma .
Theorem 3.6 A set Turing reduces to ∅ if and only if it is the limit of some parameterized set in uFPD.
P r o o f . =⇒. A similar approach as in Theorem 3.5 is applicable, but we have to limit the running time of the computation to ensure we end up with a terminating computation. Limiting by the parameter value k is sufficient as from some k on, the queried elements of ∅ will be correctly enumerated and in particular only finitely many queries will be made before the procedure terminates within k steps. From that k on, membership of x will be www.mlq-journal.org settled correctly. The decision in case of an aborted computation is irrelevant, as only finitely many values of k will lead to an aborted computation on a given x.
⇐=. The same proof as in Theorem 3.5 works. For the present case, there need not be a computable bound on the number of queries necessary, but it will still be finite for every x.
We note that the limits of sets in uFPD thus form the limit of the infinite Ershov hierarchy . The class of limits of sets in uFPD is closed under countable symmetric difference, given the indices of the constituents of the symmetric difference form a semidecidable set and the symmetric difference is well-defined. Furthermore, by Post's theorem, this class equals the level 0 2 of the arithmetic hierarchy . Because the truth-table degree of the halting set is strictly smaller than the Turing degree of the halting set, we conclude from Theorem 3.5 and Theorem 3.6 the strict inclusion suFPD ⊂ uFPD . This inclusion holds in a strong sense, namely that there are parameterized sets in uFPD for which the limit is not the limit of any parameterized set in suFPD.
With respect to complexity classes of parameterized sets, we observe an absence of theorems such as Theorem 3.5 or Theorem 3.6. We have not only characterized members of our fixed-parameter decidability classes based on their internal structure, but also based on properties of their limits. A similar characterization cannot separate fixed-parameter complexity classes including but not limited to FPT and the levels of the W-hierarchy . In fact, we can place bounds on the running time already for uFPD, comparable to what we did in the left-to-right part of Theorem 3.6.
Lemma 3.7
For every parameterized set in uFPD there is a parameterized set in uFPD with the same limit and with a decision procedure of which the running time depends linearly on the parameter and on nothing else.
P r o o f . Let A be a parameterized set in uFPD and a decision procedure for A. Define a procedure with the desired properties as follows. On input (x, k), spend k steps in total on simulating (x, j) for each j ∈ N in ascending order. After this, return the decision of the last completed computation, or 0 if no simulation finished.
For too small k, it might not even be possible to fully read x, but for every x there is some, possibly huge, value of k with which there is enough time to compute the limit decision. Thus is the decision procedure of a parameterized set in uFPD with the same limit as A. By construction, the running time of depends only on k.
We shall call a parameterized sets with a decision procedure of which the running time depends solely on the parameter slowly convergent. Note that similar lemmas can be stated for other resources, such as space, and for resource dependence complying with constructible functions of the parameter.
Combined, Theorem 3.6 and Lemma 3.7 give a characterization of the limits of members of fixed-parameter complexity classes.
Corollary 3.8 A set Turing reduces to ∅ if and only if it is the limit of some parameterized set in any fixed-parameter complexity class.
This can be interpreted either as an indication of the inherent importance of the particular ways a parameterized set converges to its limit, or as a motivation for further refinement of the definitions of the fixed-parameter tractability classes.
Relativized fixed-parameter decidability
Following terminology of fixed-parameter tractability classes , the naive version of fixed-parameter decidability put forward in the introduction should be called nonuniform fixed parameter decidability.
The class of nonuniformly fixed-parameter decidable sets is designated by nuFPD.
From a convergence point of view, nonuniform fixed-parameter tractability is not interesting because every set can occur as the limit of a parameterized set in nuFPD. An aspect of the study of parameterized sets that is of interest when looking at nuFPD is kernelizability. In the realm of fixed-parameter tractability, kernelizations are www.mlq-journal.org a fruitful research topic. For our nonuniform fixed-parameter decidability, a definition similar to the complexity theoretic one is possible.
Definition 4.2 A parameterized set
A is kernelizable if for every k ∈ N there is a computable function f k : 2 + → 2 + × N and a constant c k such that the following hold for all x ∈ 2 + : Note that we do not require either f k or c k to be computable from k. While this definition is a natural variation on its fixed-parameter tractability counterpart, we know of no natural examples of kernelizability in our nonuniform fixed-parameter decidability context. Yet, the fundamental theorem of kernelizability in fixed-parameter tractability carries over to our context.
Theorem 4.3 A parameterized set is in nuFPD if and only if it is kernelizable.
P r o o f . =⇒. We shall give a construction of f k for arbitrary k. On input x, use the decision procedure of A k to settle the membership question of x in A k . Next, output (y, k), where y is the least element of 2 + that has the same membership of A k as x. Observe that f k is computable given the decision procedure of A k and that its range consists of at most two elements, so | f k (x)| is bounded by a constant.
⇐=. We consider the case for an arbitrary k. The requirement | f k (x)| ≤ c k implies that the range of f k is finite. By hardcoding which of those finitely many elements in the range of f k belong to A, we obtain, from f k , a decision procedure for A k .
The above proof hinges on the fact that all finite sets are decidable. Indeed, the definition of nuFPD allows for undecidable collections of decidable sets. Ideally, we want to have a handle on the amount of nonuniformity required for deciding the slices of a parameterized set and nuFPD is too permissive for that. In the previous section, we addressed this permissiveness by imposing uniformity constraints. A gentler treatment is possible by controlling the amount of nonuniformity accepted. This is particularly relevant when studying preservation of any structure captured in a parameter under reductions from one parameterized set to another. Although the dominant type of reduction in fixed-parameter tractability research, the fpt-reduction, is a many-one reduction, we feel a more general reduction is more natural in the context of fixed-parameter decidability. Note, though, that the analog of the fpt-reduction for our fixed-parameter decidability classes works precisely as one would expect. The more natural reduction we propose for a study of fixed-parameter decidability where we can control nonuniformity is based on oracle Turing machines.
Definition 4.4 A parameterized set
A is fixed-parameter reducible to a parameterized set H if the following hold: 1. There is an oracle Turing machine that decides A when given H as oracle.
2. There is a computable function g : N → N such that on input (x, k), no elements of slices of H above the g(k)th are queried by when given H as oracle.
In that case, we write A FP H .
This reducibility notion defines a preorder on the parameterized sets and we obtain decidability classes as subclasses in the class of parameterized sets that are closed under this preorder. The principal relativized fixedparameter decidability classes are then defined with respect to a single oracle that, accordingly, becomes complete for the defined class.
Definition 4.5 A parameterized set A is in FPD
When we maximally limit the power of the oracle, we obtain FPD ∅ . This class is essentially just uFPD, without the requirement of convergence. Moreover, we can replace ∅ by any decidable parameterized set. Consequently, FPD ∅ is the class of decidable parameterized sets and in particular it is closed with respect to fixed-parameter reducibility. It also follows that, when we restrict our attention to convergent parameterized sets, the class uFPD is closed with respect to fixed-parameter reducibility. This does not extend to suFPD, as it cannot be distinguished www.mlq-journal.org from uFPD, which is the smallest non-empty class of convergent parameterized sets definable from FP , by fixed-parameter reducibility.
Our biggest class, nuFPD, is also closed under fixed-parameter reducibility. This can be shown similar to Theorem 4.3, but instead of hardcoding membership of elements, we hardcode finitely many decision procedures for slices. We observe though, that every permutation of some infinite set of decidable sets gives rise to a different parameterized set in nuFPD, hence nuFPD is uncountable and cannot have a complete parameterized set. Notably, this means that there is no parameterized set H for which we get nuFPD = FPD H . Whereas FPD ∅ coincides with a minimal fixed-parameter degree, nuFPD cannot be written as a countable union of fixed-parameter degrees.
When we limit fixed-parameter reducibility to the usual fixed-parameter tractability running time, we get something of a relativized fpt-reducibility that also defines a preorder. To this reducibility notion, oracles picked from FPT provide no additional computational strength. Thus, we find that FPT is closed under this relativized fpt-reducibility. This provides a new characterization of FPT. With this new characterization, we get the potentially easy way to prove membership of FPT by showing relativized fpt-reducibility to a known member of FPT.
Real numbers and parameters
Real numbers can be encoded as subsets of a countable set in more than one way. It turns out that the classification of real numbers according to their computability is often sensitive to the chosen encoding . We shall consider two prominent encodings, Dedekind cuts and binary expansions, and investigate computability notions embodied by our fixed-parameter decidability classes. For both these encodings, a real number is computable if and only if the encoding, the Dedekind cut or binary expansion, is decidable . Sensitivity to the encoding arises only with less restrictive notions of computability of real numbers.
Through any effective encoding of the rational numbers, Q, into 2 + we can interpret parameterized sets as subsets of Q × N. This enables us to look at Dedekind cuts of real numbers that arise as limits of parameterized sets. In particular, for a real number a we are interested in its left cut {q ∈ Q | q ≤ a} occurring as the limit of a parameterized set in one of our fixed-parameter decidability classes. A real number with a semidecidable left cut is called left computable (and also lower semicomputable ) and thus by Lemma 3.2 a real number is left computable if and only if its left cut is the limit of some monotonic parameterized set in suFPD. Real numbers for which either their left cut, or the complement of their left cut is semidecidable are called semicomputable . Alternatively, the semicomputable real numbers can be characterized as those for which there exists either a non-decreasing or a non-increasing computable sequence of rational numbers that converges to them . Exploiting this characterization, we can locate the semicomputable real numbers in our framework.
Theorem 5.1 A real number is semicomputable if and only if its left cut is the limit of some parameterized set in suFPD with constant bounding function 1.
P r o o f . We only need to prove the right-to-left direction. Therefore, we shall show that a parameterized set A as in the theorem converges to the left cut of a real number a that is semicomputable. For that, we may assume the left cut is not itself decidable. This means that at least one of A ∞ \A k and A k \A ∞ has, for all k, an infinite intersection with every non-empty interval around a. We shall assume the former and construct a non-decreasing computable sequence of rational numbers converging to a. For the latter case, a similar construction yields a non-increasing computable sequence of rational numbers converging to a.
Define the sequence (q i ) i∈N starting from q 0 , which we set to some rational number below a, by setting q i+1 to the largest rational number that meets the following requirements.
If no rational number meets these requirements, we set q i+1 equal to q i . Observe that q i+1 is computable, as there are only finitely many candidates for which finitely many checks have to be carried out. Also, (q i ) i∈N is non-decreasing. Furthermore, for every q in A ∞ there is some i such that we have q ≤ q i < a, hence (q i ) i∈N converges to a. This theorem can be extended to arbitrary constant bounding functions , showing that no finite level of the difference hierarchy properly extends the semicomputable real numbers when treated as Dedekind cuts.
In computer science research, the representation of real numbers by means of binary expansions is more traditional than that by means of Dedekind cuts. Without loss of generality, we focus on the real numbers between 0 and 1 and represent them as subsets of N, linking characteristic functions to binary expansions. Note that this representation is not unique for dyadic rationals, but this is a technicality that can often be ignored.
Unfortunately, for binary expansions Theorem 5.1 fails dramatically. Even the union of all finite levels of the difference hierarchy is incomparable to the class of semicomputable real numbers treated as binary expansions . In other words, when working with binary expansions we cannot easily describe the class of semicomputable real numbers in terms of limits of certain sets in suFPD. However, the class of real numbers corresponding to limits of sets in suFPD, with no further restriction on the computable bounding functions, does properly extend the semicomputable real numbers . This class of real numbers is referred to as the class of ω-computably enumerable real numbers . Even more general is the class of real numbers corresponding, using binary expansions, to limits of sets in uFPD. This is the class of computably approximable real numbers , which have also been called limit computable real numbers . When a is a computably approximable real number corresponding to the limit of a parameterized set A in uFPD, the computable reals a k corresponding to the slices A k of A form a sequence (a k ) k∈N approximating a. Such a parameterized set and such an approximation are called normed if the computably reals a k are dyadic rational numbers with denominators at most 2 k .
Lemma 5.2 Every computably approximable real number has a normed approximation.
P r o o f . Let A be the parameterized set corresponding to the computably approximable real number a.
Consider the parameterized set
The slices A k correspond to dyadic rationals with denominators at most 2 k . Furthermore, A is decidable and it converges to the same limit as A, hence A corresponds to a normed approximation of a.
Parameterized sets are a natural environment for the application of the priority method, where sets are built in a countably infinite number of stages . Many of the structural results about our fixed-parameter decidability classes incorporate a priority argument in their proofs somewhere. These arguments are sometimes quite involved. Therefore, linking fixed-parameter decidability to other contexts, such as that of the computability of real numbers, can add powerful tools to those contexts.
As an example of this additional power, we shall proceed by giving a simple proof of a theorem of which the published proof is markedly convoluted. The theorem asserts the existence of a set with two properties, the first of which being P-selectivity .
Definition 5.3
A set B ⊆ 2 + is P-selective if there exists a function f : 2 + × 2 + → 2 + that is computable in polynomial time so that the following hold for all x, y ∈ 2 + : An earlier analogue of this property without a restriction on the running time has been used in the study of reducibilities on semidecidable sets . Versions with a constrained running time, such as our P-selectivity, have been most successfully used in the study of feasibility of algorithms and computational simplicity of sets .
The second property is the more commonplace bi-immunity .
Definition 5.4 A set is bi-immune if neither it nor its complement has an infinite semidecidable subset.
It may appear that P-selectivity is a restrictive property with respect to the computability of a set and, in particular, its subsets. However, while bi-immunity requires a set and its subsets to be far from computable, it is possible to combine the two properties in one set .
Theorem 5.5 There exists a set that is both P-selective and bi-immune.
For a more concise proof of this theorem than the one that was published, we shall make use of the observation that Lemma 3.7 and Lemma 5.2 can be combined. That is, a set is the limit of a parameterized set in uFPD if and only if it is the limit of some slowly convergent normed parameterized set.
www.mlq-journal.org P r o o f . Let A be a slowly convergent normed parameterized set of which the limit is not the limit of a member of suFPD. Such a set exists because the inclusion of suFPD in uFPD is proper in the strong sense we observed in § 3.
Let a be the real number corresponding to the limit of A and let a |z| be the dyadic rational number represented by A |z| . We claim that the set B = {z | a |z| < a} is P-selective and bi-immune.
For P-selectivity, consider the function This function meets the two listed requirements in Definition 5.3. It remains to show that it is computable in polynomial time. Because A is normed, computing f only requires to decide on membership of at most min{|x|, |y|} pairs of elements. Because A is slowly convergent, each pair can be decided within a running time of |x| + |y|. Hence, f witnesses the P-selectivity of B.
For bi-immunity, we first consider infinite semidecidable subsets of B. If there was an infinite semidecidable subset B of B, then from (a |z| ) z∈B we could distill a non-decreasing computable sequence of rational numbers converging to a. Thus, a would be semicomputable, which it cannot be because it is not the limit of any member of suFPD. Conversely, infinite semidecidable subsets of the complement of B would lead to non-increasing computable sequences of rational numbers converging to a. Again, this would contradict our choice of A.
Essentially, the above proof comes down to the observation that computably approximable real numbers with slowly convergent normed approximations exist also outside the ω-computably enumerable real numbers.
We note that the link between parameter values and computable real numbers represented by slices provides an alternative ordering of parameter values. This alternative ordering has more structure than the standard ordering of parameter values. In the above proof of bi-immunity, it is crucial that the alternative order has precisely one accumulation point. This perspective demonstrates the potential for parameters that showcase an additional structure different from N.
Algorithmic randomness and parameters
In isolation, an element x of 2 + may be called random if its length does not significantly exceed the length of any complete description of x. This idea is made formal by using the Kolmogorov complexity , K(x), as a measure of the shortest complete description of x. Loosely speaking, the Kolmogorov complexity is defined as K(x) = min{| | | is a Turing machine that takes no input and outputs x}.
By referring to the length of a Turing machine, the Kolmogorov complexity is only defined up to an additive constant depending on the encoding of Turing machines used.
When investigating the computability of sets, we can ask ourselves what elements are responsible for the level of computability a set is in. This intuition of inherent difficulty in deciding membership of certain instances in a set is made precise by the notion of instance complexity . With respect to a set B, the instance complexity of an element x of 2 + is defined as ic(x : B) = min{| | | is a consistent procedure with respect to B and (x) ∈ 2}.
Thus, the instance complexity of an element x with respect to a set B is a measure of the shortest procedure that decides the membership of x in B without making mistakes on other elements. Like Kolmogorov complexity, instance complexity too is only defined up to an additive constant. Still, both notions are rigid enough to be of great use . We remind ourselves of the fact that a consistent procedure was defined in § 2 as a Turing machine that always halts and outputs either 'yes', 'no', or 'unresolved'. Broadening the above definition of instance complexity to allow for partial decision procedures, which are permitted to not halt, we obtain a weaker notion of instance complexity . We shall not use this weaker notion and stick with the definition based on consistent procedures.
For any set B and any element x of 2 + we have, up to an additive constant independent of B and x, the inequality ic(x : B) ≤ K(x) . This bound is witnessed by a procedure that first generates x and then checks its input against x, yielding an embedded membership decision in case of a match and 'unresolved' otherwise. When the set B is decidable, we can do a lot better, since a decision procedure for B is in particular a consistent procedure for B. Hence, the instance complexity of any element with respect to a decidable set is bounded. The converse is also true, as membership of any element is then correctly decided by one or more members of a finite collection of consistent procedures. Still, for sets that are not decidable questions concerning the tightness of the inequality between instance complexity and Kolmogorov complexity have been of great interest . It has been conjectured that with respect to any set that is not decidable the instance complexity is within a fixed additive distance from the Kolmogorov complexity for infinitely many elements of 2 + . Such elements are called hard instances with respect to the set in much the same way elements of which the Kolmogorov complexity is close to the length are called random.
Note that there is a significant gap between the instance complexity with respect to decidable sets and the conjectured instance complexity of infinitely many elements with respect to sets that are not decidable. With respect to decidable sets the instance complexity is bounded, while with respect to sets that are not decidable the instance complexity is conjectured to be close to the Kolmogorov complexity infinitely often. While classes of sets are known for which indeed with respect to any set in the class there are infinitely many hard instances, the conjectured lower bound fails in general. Using a priority argument, a semidecidable but not decidable set has been constructed with respect to which the instance complexity is always less than the logarithm of the Kolmogorov complexity, up to an additive constant . This result is tight in the sense that with respect to any set that is not decidable the instance complexity must infinitely often be at least as large as the logarithm of the Kolmogorov complexity .
Parameterized analysis provides new means of tackling questions related to the existence of hard instances. For parameterized sets A in FPD ∅ , a computable partial function μ can be defined as where the minimum of the empty set is left undefined. This partial function is especially meaningful when A is monotonic. For a monotonic set A that is in FPD ∅ , and thus in suFPD, the domain of the partial function equals A ∞ , which is semidecidable by Lemma 3.2. An immediate relationship between instance complexity and the value of μ is available for monotonic parameterized sets in suFPD. Lemma 6.1 For any monotonic parameterized set A in suFPD, for all x ∈ A ∞ , we have, up to an additive constant independent of x, the inequality ic(x : A ∞ ) ≤ |μ(x : A)|.
P r o o f . Let be a decision procedure for A. From we can get decision procedures for the slices of A. If we modify a decision procedure for a slice of A so that it outputs 'unresolved' whenever it would output 0, we obtain a consistent procedure with respect to A ∞ . The specification of such a consistent procedure, for a slice k, is possible within length |k| + c, where c is a constant depending on . For all x ∈ A ∞ , taking k = μ(x : A), we thus obtain a decision procedure that halts on x and satisfies the required inequality.
Although with respect to arbitrary undecidable sets the conjectured existence of infinitely many hard instances need not be true, we can prove that the Kolmogorov complexity gets below our μ infinitely often. Together with Lemma 6.1, this bounds the distance between the instance complexity and the Kolmogorov complexity. P r o o f . Observe that for any parameterized set A in FPD ∅ that has infinitely many different slices and any constant n, we can find an x such that we have μ(x : A) ≥ n or even |μ(x : A)| ≥ n. This can be made effective, be it tedious, by means of a computable function f A : N → 2 + so that we have |μ( f A (n) : A)| ≥ n. It follows that there is a constant c depending on f A such that for all n we have K( f A (n)) ≤ |n| + c.
Without loss of generality, we may assume that f A is injective. Now suppose that for almost all n we have K( f A (n)) > |μ( f A (n) : A)|. By construction we would then find, for almost all n, the inequalities n ≤ |μ( f A (n) : A)| < K( f A (n)) ≤ |n| + c, which cannot be true. Hence there are infinitely many n for which we have K( f A (n)) ≤ |μ( f A (n) : A)|, proving the theorem.
Of course, there may be infinitely many x sharing a fixed value under μ, whereas only finitely many of these x could have a Kolmogorov complexity less than this value under μ. As a corollary to the above theorem in www.mlq-journal.org combination with Lemma 6.1, we obtain that if we can find a monotonic parameterized set A in suFPD such that for all x we have that |μ(x : A)| gets within an additive constant of ic(x : A ∞ ), the instance complexity conjecture holds for A ∞ . A direct attempt at applying this corollary would, given an arbitrary set B, look at the monotonic parameterized set With this parameterized set we have, for all x ∈ B, |μ(x : A)| = ic(x : A ∞ ) and A ∞ = B. However, A need not be in suFPD. In order to get a grip on the fixed-parameter decidability of A, consider the auxiliary monotonic parameterized set H = {( , k) | is a consistent procedure with respect to B ∧ | | ≤ |k|}.
This parameterized set is so that A is in FPD H . Additionally, because the slices of H are all finite, we find that A is in nuFPD. Yet, by Rice's theorem , H ∞ is undecidable and by the relationship between | | and |k| in the definition of H this can be extended to H itself being undecidable. Thus FPD H is strictly bigger than FPD ∅ .
Conclusion
We have introduced a number of classes of parameterized sets, defined by decidability properties of their slices. The classes form a chain suFPD ⊂ uFPD ⊂ FPD ∅ ⊂ nuFPD.
To the left of this chain, there is also the class of monotonic sets in suFPD. Inside suFPD is an entire subhierarchy based on bounding functions. Classes of the form FPD H , where H is a parameterized set, exist between FPD ∅ and nuFPD. These classes are inspired by parameterized analysis in complexity theory, chiefly by the class FPT of fixed-parameter tractable sets, and likewise come naturally with a fixed-parameter reducibility notion. Unique to the setting of decidability is the fact that suFPD and uFPD can be characterized by means of reducibility of the limits of convergent parameterized sets to the halting set. For suFPD the reducibility involved is truth-table reducibility or, equivalently, weak truth-table reducibility, whereas for uFPD the specific reducibility notion is Turing reducibility. Fixed-parameter complexity classes cannot be distinguished based on the reducibility of the limits of their members to the halting set. A set that is Turing reducible to the halting set can occur as the limit of a convergent parameterized set in any fixed-parameter complexity class.
For the computability of real numbers, we located some notable classes of real numbers in our parameterized framework. In doing so, we illustrated the dependence of the classification on the encoding of real numbers, considering both Dedekind cuts and binary expansions. Using properties of the above chain of parameterized decidability classes we were able to state a concise proof of a theorem that until now only had a convoluted published proof. This proof demonstrates the ability of parameterized analysis to subsume a tacit priority argument.
Lastly, we have shown that in suFPD, parameters give rise to a computable partial upper bound to the instance complexity of members of limit sets. Moreover, this upper bound is shown to upper bound the Kolmogorov complexity as well, infinitely often. The latter behavior is interesting because the class of parameterized sets for which it holds allows the specific upper bound to be as slowly increasing as any unbounded computable partial function. Similarly, as an upper bound to the instance complexity of members of a limit set, it is possible to exercise control over the tightness of the bound via the choice of a parameterized set. These upper bounds thus constitute a new take on the instance complexity conjecture.
|
# -*- coding: utf-8 -*-
"""
Skyperious main program entrance: launches GUI application or executes command
line interface, handles logging and status calls.
------------------------------------------------------------------------------
This file is part of Skyperious - a Skype database viewer and merger.
Released under the MIT License.
@author <NAME>
@created 26.11.2011
@modified 15.06.2015
------------------------------------------------------------------------------
"""
from __future__ import print_function
import argparse
import atexit
import codecs
import collections
import datetime
import errno
import getpass
import glob
import locale
import io
import itertools
import Queue
import os
import shutil
import sys
import threading
import time
import traceback
import warnings
try:
import wx
is_gui_possible = True
except ImportError:
is_gui_possible = False
try: # For printing to a console from a packaged Windows binary
import win32console
except ImportError:
win32console = None
import conf
import export
import skypedata
import util
import workers
if is_gui_possible:
import guibase
import skyperious
import support
ARGUMENTS = {
"description": "%s - Skype SQLite database viewer and merger." % conf.Title,
"arguments": [
{"args": ["--verbose"], "action": "store_true",
"help": "print detailed progress messages to stderr"}, ],
"commands": [
{"name": "export",
"help": "export Skype databases as HTML, text or spreadsheet",
"description": "Export all message history from a Skype database "
"into files under a new folder" + (", or a single Excel "
"workbook with chats on separate sheets."
if export.xlsxwriter else ""),
"arguments": [
{"args": ["-t", "--type"], "dest": "type",
"choices": ["html", "xlsx", "csv", "txt", "xlsx_single"]
if export.xlsxwriter else ["html", "csv", "txt"],
"default": "html", "required": False,
"help": "export type: HTML files (default), Excel workbooks, "
"CSV spreadsheets, text files, or a single Excel "
"workbook with separate sheets" if export.xlsxwriter
else
"export type: HTML files (default), CSV spreadsheets, "
"text files", },
{"args": ["FILE"], "nargs": "+",
"help": "one or more Skype databases to export", },
{"args": ["-c", "--chat"], "dest": "chat", "required": False,
"help": "names of specific chats to export", "nargs": "+"},
{"args": ["-a", "--author"], "dest": "author", "required": False,
"help": "names of specific authors whose chats to export",
"nargs": "+"},
{"args": ["-p", "--password"], "dest": "ask_password",
"action": "store_true", "required": False,
"help": "ask for Skype password on HTML export, "
"to download shared images"},
{"args": ["--verbose"], "action": "store_true",
"help": "print detailed progress messages to stderr"}, ],
},
{"name": "search",
"help": "search Skype databases for messages or data",
"description": "Search Skype databases for messages, chat or contact "
"information, or table data.",
"arguments": [
{"args": ["-t", "--type"], "dest": "type", "required": False,
"choices": ["message", "contact", "chat", "table"],
"default": "message",
"help": "search in message body (default), in contact "
"information, in chat title and participants, or in any "
"database table", },
{"args": ["QUERY"],
"help": "search query, with a Google-like syntax, for example: "
"\"this OR that chat:links from:john\". More on syntax "
"at https://suurjaak.github.io/Skyperious/help.html. " },
{"args": ["FILE"], "nargs": "+",
"help": "Skype database file(s) to search", },
{"args": ["--verbose"], "action": "store_true",
"help": "print detailed progress messages to stderr"}, ],
},
{"name": "merge", "help": "merge two or more Skype databases "
"into a new database",
"description": "Merge two or more Skype database files into a new "
"database in current directory, with a full combined "
"message history. New filename will be generated "
"automatically. Last database in the list will "
"be used as base for comparison.",
"arguments": [
{"args": ["FILE1"], "metavar": "FILE1", "nargs": 1,
"help": "first Skype database"},
{"args": ["FILE2"], "metavar": "FILE2", "nargs": "+",
"help": "more Skype databases"},
{"args": ["--verbose"], "action": "store_true",
"help": "print detailed progress messages to stderr"},
{"args": ["-o", "--output"], "dest": "output", "required": False,
"help": "Final database filename, auto-generated by default"},
]
},
{"name": "diff", "help": "compare chat history in two Skype databases",
"description": "Compare two Skype databases for differences "
"in chat history.",
"arguments": [
{"args": ["FILE1"], "help": "first Skype database", "nargs": 1},
{"args": ["FILE2"], "help": "second Skype databases", "nargs": 1},
{"args": ["--verbose"], "action": "store_true",
"help": "print detailed progress messages to stderr"}, ],
},
{"name": "gui",
"help": "launch Skyperious graphical program (default option)",
"description": "Launch Skyperious graphical program (default option)",
"arguments": [
{"args": ["FILE"], "nargs": "*",
"help": "Skype database to open on startup, if any"}, ]
},
],
}
window = None # Application main window instance
deferred_logs = [] # Log messages cached before main window is available
deferred_status = [] # Last status cached before main window is available
is_cli = False # Is program running in command-line interface mode
is_verbose = False # Is command-line interface verbose
def log(text, *args):
"""
Logs a timestamped message to main window.
@param args string format arguments, if any, to substitute in text
"""
global deferred_logs, is_cli, is_verbose, window
now = datetime.datetime.now()
try:
finaltext = text % args if args else text
except UnicodeError:
args = tuple(map(util.to_unicode, args))
finaltext = text % args if args else text
if "\n" in finaltext: # Indent all linebreaks
finaltext = finaltext.replace("\n", "\n\t\t")
msg = "%s.%03d\t%s" % (now.strftime("%H:%M:%S"), now.microsecond / 1000,
finaltext)
if window:
process_deferreds()
wx.PostEvent(window, guibase.LogEvent(text=msg))
elif is_cli and is_verbose:
sys.stderr.write(msg + "\n"), sys.stderr.flush()
else:
deferred_logs.append(msg)
def status(text, *args):
"""
Sets main window status text.
@param args string format arguments, if any, to substitute in text
"""
global deferred_status, is_cli, is_verbose, window
try:
msg = text % args if args else text
except UnicodeError:
args = tuple(map(util.to_unicode, args))
msg = text % args if args else text
if window:
process_deferreds()
wx.PostEvent(window, guibase.StatusEvent(text=msg))
elif is_cli and is_verbose:
sys.stderr.write(msg + "\n")
else:
deferred_status[:] = [msg]
def status_flash(text, *args):
"""
Sets main window status text that will be cleared after a timeout.
@param args string format arguments, if any, to substitute in text
"""
global deferred_status, window
try:
msg = text % args if args else text
except UnicodeError:
args = tuple(map(util.to_unicode, args))
msg = text % args if args else text
if window:
process_deferreds()
wx.PostEvent(window, guibase.StatusEvent(text=msg))
def clear_status():
if window.StatusBar and window.StatusBar.StatusText == msg:
window.SetStatusText("")
wx.CallLater(conf.StatusFlashLength, clear_status)
else:
deferred_status[:] = [msg]
def logstatus(text, *args):
"""
Logs a timestamped message to main window and sets main window status text.
@param args string format arguments, if any, to substitute in text
"""
log(text, *args)
status(text, *args)
def logstatus_flash(text, *args):
"""
Logs a timestamped message to main window and sets main window status text
that will be cleared after a timeout.
@param args string format arguments, if any, to substitute in text
"""
log(text, *args)
status_flash(text, *args)
def process_deferreds():
"""
Forwards log messages and status, cached before main window was available.
"""
global deferred_logs, deferred_status, window
if window:
if deferred_logs:
for msg in deferred_logs:
wx.PostEvent(window, guibase.LogEvent(text=msg))
del deferred_logs[:]
if deferred_status:
wx.PostEvent(window, guibase.StatusEvent(text=deferred_status[0]))
del deferred_status[:]
def run_merge(filenames, output_filename=None):
"""Merges all Skype databases to a new database."""
dbs = [skypedata.SkypeDatabase(f) for f in filenames]
db_base = dbs.pop()
counts = collections.defaultdict(lambda: collections.defaultdict(int))
postbacks = Queue.Queue()
name, ext = os.path.splitext(os.path.split(db_base.filename)[-1])
now = datetime.datetime.now().strftime("%Y%m%d")
if not output_filename:
output_filename = util.unique_path("%s.merged.%s%s" % (name, now, ext))
output("Creating %s, using %s as base." % (output_filename, db_base))
bar = ProgressBar()
bar.start()
shutil.copyfile(db_base.filename, output_filename)
db2 = skypedata.SkypeDatabase(output_filename)
chats2 = db2.get_conversations()
db2.get_conversations_stats(chats2)
args = {"db2": db2, "type": "diff_merge_left"}
worker = workers.MergeThread(postbacks.put)
try:
for db1 in dbs:
chats = db1.get_conversations()
db1.get_conversations_stats(chats)
bar.afterword = " Processing %.*s.." % (30, db1)
worker.work(dict(args, db1=db1, chats=chats))
while True:
result = postbacks.get()
if "error" in result:
output("Error merging %s:\n\n%s" % (db1, result["error"]))
db1 = None # Signal for global break
break # break while True
if "done" in result:
break # break while True
if "diff" in result:
counts[db1]["chats"] += 1
counts[db1]["msgs"] += len(result["diff"]["messages"])
if "index" in result:
bar.max = result["count"]
bar.update(result["index"])
if result.get("output"):
log(result["output"])
if not db1:
break # break for db1 in dbs
bar.stop()
bar.afterword = " Processed %s." % db1
bar.update(bar.max)
output()
finally:
worker and (worker.stop(), worker.join())
if not counts:
output("Nothing new to merge.")
db2.close()
os.unlink(output_filename)
else:
for db1 in dbs:
output("Merged %s in %s from %s." %
(util.plural("message", counts[db1]["msgs"]),
util.plural("chat", counts[db1]["chats"]), db1))
output("Merge into %s complete." % db2)
def run_search(filenames, query):
"""Searches the specified databases for specified query."""
dbs = [skypedata.SkypeDatabase(f) for f in filenames]
postbacks = Queue.Queue()
args = {"text": query, "table": "messages", "output": "text"}
worker = workers.SearchThread(postbacks.put)
try:
for db in dbs:
log("Searching \"%s\" in %s." % (query, db))
worker.work(dict(args, db=db))
while True:
result = postbacks.get()
if "error" in result:
output("Error searching %s:\n\n%s" %
(db, result.get("error_short", result["error"])))
break # break while True
if "done" in result:
log("Finished searching for \"%s\" in %s.", query, db)
break # break while True
if result.get("count", 0) or is_verbose:
if len(dbs) > 1:
output("%s:" % db, end=" ")
output(result["output"])
finally:
worker and (worker.stop(), worker.join())
def run_export(filenames, format, chatnames, authornames, ask_password):
"""Exports the specified databases in specified format."""
dbs = [skypedata.SkypeDatabase(f) for f in filenames]
is_xlsx_single = ("xlsx_single" == format)
for db in dbs:
if (ask_password and db.id and conf.SharedImageAutoDownload
and format.lower().endswith("html")):
prompt = "Enter Skype password for '%s': " % db.id
while not skypedata.SharedImageDownload.has_login(db.id):
with warnings.catch_warnings():
warnings.simplefilter("ignore") # possible GetPassWarning
output(prompt, end="") # getpass output can raise errors
pw = getpass.getpass("", io.BytesIO())
if not pw: continue # while
try:
skypedata.SharedImageDownload.login(db.id, pw)
except Exception as e:
log("Error signing in %s on Skype web.\n\n%s",
db.id, util.format_exc(e))
prompt = "%s\nEnter Skype password for '%s': " % (e, db.id)
formatargs = collections.defaultdict(str)
formatargs["skypename"] = os.path.basename(db.filename)
formatargs.update(db.account or {})
basename = util.safe_filename(conf.ExportDbTemplate % formatargs)
dbstr = "from %s " % db if len(dbs) != 1 else ""
if is_xlsx_single:
export_dir = os.getcwd()
filename = util.unique_path("%s.xlsx" % basename)
else:
export_dir = util.unique_path(os.path.join(os.getcwd(), basename))
filename = format
target = filename if is_xlsx_single else export_dir
try:
extras = [("", chatnames)] if chatnames else []
extras += [(" with authors", authornames)] if authornames else []
output("Exporting%s%s as %s %sto %s." %
(" chats" if extras else "",
",".join("%s like %s" % (x, y) for x, y in extras),
format[:4].upper(), dbstr, target))
chats = sorted(db.get_conversations(chatnames, authornames),
key=lambda x: x["title"].lower())
db.get_conversations_stats(chats)
bar_total = sum(c["message_count"] for c in chats)
bartext = " Exporting %.*s.." % (30, db.filename) # Enforce width
bar = ProgressBar(max=bar_total, afterword=bartext)
bar.start()
result = export.export_chats(chats, export_dir, filename, db,
progress=bar.update)
files, count = result
bar.stop()
if count:
bar.afterword = " Exported %s to %s. " % (db, target)
bar.update(bar_total)
output()
log("Exported %s %sto %s as %s.", util.plural("chat", count),
dbstr, target, format)
else:
output("\nNo messages to export%s." %
("" if len(dbs) == 1 else " from %s" % db))
os.unlink(filename) if is_xlsx_single else os.rmdir(export_dir)
except Exception as e:
output("Error exporting chats: %s\n\n%s" %
(e, traceback.format_exc()))
def run_diff(filename1, filename2):
"""Compares the first database for changes with the second."""
if os.path.realpath(filename1) == os.path.realpath(filename2):
output("Error: cannot compare %s with itself." % filename1)
return
db1, db2 = map(skypedata.SkypeDatabase, [filename1, filename2])
counts = collections.defaultdict(lambda: collections.defaultdict(int))
postbacks = Queue.Queue()
bar_text = "%.*s.." % (50, " Scanning %s vs %s" % (db1, db2))
bar = ProgressBar(afterword=bar_text)
bar.start()
chats1, chats2 = db1.get_conversations(), db2.get_conversations()
db1.get_conversations_stats(chats1), db2.get_conversations_stats(chats2)
args = {"db1": db1, "db2": db2, "chats": chats1, "type": "diff_left"}
worker = workers.MergeThread(postbacks.put)
try:
worker.work(args)
while True:
result = postbacks.get()
if "error" in result:
output("Error scanning %s and %s:\n\n%s" %
(db1, db2, result["error"]))
break # break while True
if "done" in result:
break # break while True
if "chats" in result and result["chats"]:
counts[db1]["chats"] += 1
msgs = len(result["chats"][0]["diff"]["messages"])
msgs_text = util.plural("new message", msgs)
contacts_text = util.plural("new participant",
result["chats"][0]["diff"]["participants"])
text = ", ".join(filter(None, [msgs_text, contacts_text]))
bar.afterword = (" %s, %s." % (result["chats"][0]["chat"]["title"],
text))
counts[db1]["msgs"] += msgs
if "index" in result:
bar.max = result["count"]
bar.update(result["index"])
if result.get("output"):
log(result["output"])
finally:
worker and (worker.stop(), worker.join())
bar.stop()
bar.afterword = " Scanned %s and %s." % (db1, db2)
bar.update(bar.max)
output()
def run_gui(filenames):
"""Main GUI program entrance."""
global deferred_logs, deferred_status, window
# Values in some threads would otherwise not be the same
sys.modules["main"].deferred_logs = deferred_logs
sys.modules["main"].deferred_status = deferred_status
# Create application main window
app = wx.App(redirect=True) # stdout and stderr redirected to wx popup
window = sys.modules["main"].window = skyperious.MainWindow()
app.SetTopWindow(window) # stdout/stderr popup closes with MainWindow
# Decorate write to catch printed errors
try: sys.stdout.write = support.reporting_write(sys.stdout.write)
except Exception: pass
# Some debugging support
window.run_console("import datetime, os, re, time, sys, wx")
window.run_console("# All %s modules:" % conf.Title)
window.run_console("import conf, controls, emoticons, export, guibase, "
"images, main, searchparser, skypedata, skyperious, "
"support, templates, util, wordcloud, workers, "
"wx_accel")
window.run_console("self = main.window # Application main window instance")
log("Started application on %s.", datetime.date.today())
for f in filter(os.path.isfile, filenames):
wx.CallAfter(wx.PostEvent, window, skyperious.OpenDatabaseEvent(file=f))
app.MainLoop()
def run(nogui=False):
"""Parses command-line arguments and either runs GUI, or a CLI action."""
global is_cli, is_gui_possible, is_verbose
if (getattr(sys, 'frozen', False) # Binary application
or sys.executable.lower().endswith("pythonw.exe")):
sys.stdout = ConsoleWriter(sys.stdout) # Hooks for attaching to
sys.stderr = ConsoleWriter(sys.stderr) # a text console
if "main" not in sys.modules: # E.g. setuptools install, calling main.run
srcdir = os.path.abspath(os.path.dirname(__file__))
if srcdir not in sys.path: sys.path.append(srcdir)
sys.modules["main"] = __import__("main")
argparser = argparse.ArgumentParser(description=ARGUMENTS["description"])
for arg in ARGUMENTS["arguments"]:
argparser.add_argument(*arg.pop("args"), **arg)
subparsers = argparser.add_subparsers(dest="command")
for cmd in ARGUMENTS["commands"]:
kwargs = dict((k, cmd[k]) for k in cmd if k in ["help", "description"])
subparser = subparsers.add_parser(cmd["name"], **kwargs)
for arg in cmd["arguments"]:
kwargs = dict((k, arg[k]) for k in arg if k != "args")
subparser.add_argument(*arg["args"], **kwargs)
if "nt" == os.name: # Fix Unicode arguments, otherwise converted to ?
sys.argv[:] = win32_unicode_argv()
argv = sys.argv[1:]
if not argv or (argv[0] not in subparsers.choices
and argv[0].endswith(".db")):
argv[:0] = ["gui"] # argparse hack: force default argument
if argv[0] in ("-h", "--help") and len(argv) > 1:
argv[:2] = argv[:2][::-1] # Swap "-h option" to "option -h"
arguments = argparser.parse_args(argv)
if hasattr(arguments, "FILE1") and hasattr(arguments, "FILE2"):
arguments.FILE1 = [util.to_unicode(f) for f in arguments.FILE1]
arguments.FILE2 = [util.to_unicode(f) for f in arguments.FILE2]
arguments.FILE = arguments.FILE1 + arguments.FILE2
if arguments.FILE: # Expand wildcards to actual filenames
arguments.FILE = sum([glob.glob(f) if "*" in f else [f]
for f in arguments.FILE], [])
arguments.FILE = sorted(set(util.to_unicode(f) for f in arguments.FILE))
if "gui" == arguments.command and (nogui or not is_gui_possible):
argparser.print_help()
status = None
if not nogui: status = ("\n\nwxPython not found. %s graphical program "
"will not run." % conf.Title)
sys.exit(status)
elif "gui" != arguments.command:
conf.load()
is_cli = sys.modules["main"].is_cli = True
is_verbose = sys.modules["main"].is_verbose = arguments.verbose
# Avoid Unicode errors when printing to console.
enc = sys.stdout.encoding or locale.getpreferredencoding() or "utf-8"
sys.stdout = codecs.getwriter(enc)(sys.stdout, "xmlcharrefreplace")
sys.stderr = codecs.getwriter(enc)(sys.stderr, "xmlcharrefreplace")
if "diff" == arguments.command:
run_diff(*arguments.FILE)
elif "merge" == arguments.command:
run_merge(arguments.FILE, arguments.output)
elif "export" == arguments.command:
run_export(arguments.FILE, arguments.type, arguments.chat,
arguments.author, arguments.ask_password)
elif "search" == arguments.command:
run_search(arguments.FILE, arguments.QUERY)
elif "gui" == arguments.command:
run_gui(arguments.FILE)
def run_cli():
"""Runs program in command-line interface mode."""
run(nogui=True)
class ConsoleWriter(object):
"""
Wrapper for sys.stdout/stderr, attaches to the parent console or creates
a new command console, usable from python.exe, pythonw.exe or
compiled binary. Hooks application exit to wait for final user input.
"""
handle = None # note: class variables
is_loaded = False
realwrite = None
def __init__(self, stream):
"""
@param stream sys.stdout or sys.stderr
"""
self.encoding = getattr(stream, "encoding", locale.getpreferredencoding())
self.stream = stream
def flush(self):
if not ConsoleWriter.handle and ConsoleWriter.is_loaded:
self.stream.flush()
elif hasattr(ConsoleWriter.handle, "flush"):
ConsoleWriter.handle.flush()
def write(self, text):
"""
Prints text to console window. GUI application will need to attach to
the calling console, or launch a new console if not available.
"""
global window
if not window and win32console:
if not ConsoleWriter.is_loaded and not ConsoleWriter.handle:
try:
win32console.AttachConsole(-1) # pythonw.exe from console
atexit.register(lambda: ConsoleWriter.realwrite("\n"))
except Exception:
pass # Okay if fails: can be python.exe from console
try:
handle = win32console.GetStdHandle(
win32console.STD_OUTPUT_HANDLE)
handle.WriteConsole("\n" + text)
ConsoleWriter.handle = handle
ConsoleWriter.realwrite = handle.WriteConsole
except Exception: # Fails if GUI program: make new console
try: win32console.FreeConsole()
except Exception: pass
try:
win32console.AllocConsole()
handle = open("CONOUT$", "w")
argv = [util.longpath(sys.argv[0])] + sys.argv[1:]
handle.write(" ".join(argv) + "\n\n" + text)
handle.flush()
ConsoleWriter.handle = handle
ConsoleWriter.realwrite = handle.write
sys.stdin = open("CONIN$", "r")
exitfunc = lambda s: (handle.write(s), handle.flush(),
raw_input())
atexit.register(exitfunc, "\nPress ENTER to exit.")
except Exception:
try: win32console.FreeConsole()
except Exception: pass
ConsoleWriter.realwrite = self.stream.write
ConsoleWriter.is_loaded = True
else:
try:
self.realwrite(text)
self.flush()
except Exception:
self.stream.write(text)
else:
self.stream.write(text)
class ProgressBar(threading.Thread):
"""
A simple ASCII progress bar with a ticker thread, drawn like
'[---------\ 36% ] Progressing text..'.
"""
def __init__(self, max=100, value=0, min=0, width=30, forechar="-",
backchar=" ", foreword="", afterword="", interval=1):
"""
Creates a new progress bar, without drawing it yet.
@param max progress bar maximum value, 100%
@param value progress bar initial value
@param min progress bar minimum value, for 0%
@param width progress bar width (in characters)
@param forechar character used for filling the progress bar
@param backchar character used for filling the background
@param foreword text in front of progress bar
@param afterword text after progress bar
@param interval ticker thread interval, in seconds
"""
threading.Thread.__init__(self)
for k, v in locals().items(): setattr(self, k, v) if "self" != k else 0
self.daemon = True # Daemon threads do not keep application running
self.percent = None # Current progress ratio in per cent
self.value = None # Current progress bar value
self.bar = "%s[-%s]%s" % (foreword, " " * (self.width - 3), afterword)
self.printbar = self.bar # Printable text, includes padding to clear
self.progresschar = itertools.cycle("-\\|/")
self.is_running = False
self.update(value, draw=False)
def update(self, value, draw=True):
"""Updates the progress bar value, and refreshes by default."""
self.value = min(self.max, max(self.min, value))
new_percent = int(round(100.0 * self.value / (self.max or 1)))
w_full = self.width - 2
w_done = max(1, int(round((new_percent / 100.0) * w_full)))
# Build bar outline, animate by cycling last char from progress chars
char_last = self.forechar
if draw and w_done < w_full: char_last = next(self.progresschar)
bartext = "%s[%s%s%s]%s" % (
self.foreword, self.forechar * (w_done - 1), char_last,
self.backchar * (w_full - w_done), self.afterword)
# Write percentage into the middle of the bar
centertxt = " %2d%% " % new_percent
pos = len(self.foreword) + self.width / 2 - len(centertxt) / 2
bartext = bartext[:pos] + centertxt + bartext[pos + len(centertxt):]
self.printbar = bartext + " " * max(0, len(self.bar) - len(bartext))
self.bar = bartext
self.percent = new_percent
if draw: self.draw()
def draw(self):
"""Prints the progress bar, from the beginning of the current line."""
output("\r" + self.printbar, end=" ")
def run(self):
self.is_running = True
while self.is_running and time:
self.update(self.value), time.sleep(self.interval)
def stop(self):
self.is_running = False
def win32_unicode_argv():
# @from http://stackoverflow.com/a/846931/145400
result = sys.argv
from ctypes import POINTER, byref, cdll, c_int, windll
from ctypes.wintypes import LPCWSTR, LPWSTR
GetCommandLineW = cdll.kernel32.GetCommandLineW
GetCommandLineW.argtypes = []
GetCommandLineW.restype = LPCWSTR
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)]
CommandLineToArgvW.restype = POINTER(LPWSTR)
argc = c_int(0)
argv = CommandLineToArgvW(GetCommandLineW(), byref(argc))
if argc.value:
# Remove Python executable and commands if present
start = argc.value - len(sys.argv)
result = [argv[i].encode("utf-8") for i in range(start, argc.value)]
return result
def output(*args, **kwargs):
"""Print wrapper, avoids "Broken pipe" errors if piping is interrupted."""
print(*args, **kwargs)
try:
sys.stdout.flush() # Uncatchable error otherwise if interrupted
except IOError as e:
if e.errno in (errno.EINVAL, errno.EPIPE):
sys.exit() # Stop work in progress if sys.stdout or pipe closed
raise # Propagate any other errors
if "__main__" == __name__:
try: run()
except KeyboardInterrupt: sys.exit()
|
<filename>b2_base/src/b2_logic/nodes/ir_sensors.py
from __future__ import print_function
import rospy
from b2_base.msg import Proximity
class IRSensors:
def __init__(self, pub_rate, num_adc_channels, vref, min_adc_val,
max_adc_val, proximity_dist, center_pub, mcp_obj):
self._pub_rate = pub_rate
self._num_adc_channels = num_adc_channels
# Calculate the ADC value when an object is in "proximity"
v_per_adc = volts_per_adc(vref, min_adc_val, max_adc_val)
self._acd_at_prox_dist = adc_at_proximity_dist(proximity_dist, v_per_adc)
rospy.logdebug("v_per_adc: {}".format(v_per_adc))
rospy.logdebug("acd_at_prox_dist: {}".format(self._acd_at_prox_dist))
self._center_pub = center_pub
self._mcp = mcp_obj
def run(self):
try:
while not rospy.is_shutdown():
msg = Proximity()
for channel in range(self._num_adc_channels):
is_proximity = False # No object detected yet
# Read sensor values
val = self._mcp.read_adc(channel)
if val >= self._acd_at_prox_dist:
is_proximity = True
# # Flip for debugging
# if self._test_mode:
# self._mcp.set_adc(channel, self._mcp.read_adc(channel) * -1)
# Publish sensor messages
msg.sensors.append(is_proximity)
self._center_pub.publish(msg)
self._pub_rate.sleep()
except rospy.ROSInterruptException:
rospy.logwarn("ROSInterruptException received in main loop")
def volts_at_cm_distance(dist_cm):
# This function is the result of fitting the Voltage/Distance curve points in the
# Sharp GP2Y0A60SZXF data sheet https://www.pololu.com/file/0J812/gp2y0a60szxf_e.pdf
# using the site http://mycurvefit.com
# The function takes in distance in cm, and outputs the voltage of the IR sensor's output
return 0.5955366 + 6.8125134 / (1 + (dist_cm / 8.798111) ** 1.624654)
def adc_at_proximity_dist(prox_dist_m, v_per_adc):
prox_dist_cm = prox_dist_m * 100
v_at_prox_dist = volts_at_cm_distance(prox_dist_cm)
return int(v_at_prox_dist / v_per_adc)
def volts_per_adc(vref, min_adc_reading, max_adc_reading):
return vref / float(max_adc_reading - min_adc_reading)
class MCP3008Stub:
def __init__(self):
self.channels = [0] * 8
def read_adc(self, channel):
val = self.channels[channel]
# For testing, flip the value each time it is read
self.channels[channel] = val * -1
return val
def set_adc(self, channel, val):
self.channels[channel] = val
|
<reponame>scloudic/rabbit-framework
package com.scloudic.rabbitframework.web.test;
import com.scloudic.rabbitframework.web.DataJsonResponse;
public class DataJsonResponseTest {
public static void main(String[] args) {
DataJsonResponse dataJsonResponse = new DataJsonResponse();
dataJsonResponse.setData("test", null);
dataJsonResponse.setData("test1", "test1");
System.out.println(dataJsonResponse.toJson());
}
}
|
import React, { useState } from 'react';
import PropTypes from 'prop-types';
import { useDispatch, useSelector } from 'react-redux';
import { setCharacter, setCharacters } from 'store/actions';
import Character from 'types/Character';
import { Form } from './styles';
interface CharacterFormProps {
character: Character;
onSubmit(): void;
}
interface RootState {
characters: Character[];
}
const CharacterForm: React.FC<CharacterFormProps> = ({
character,
onSubmit,
}) => {
const dispatch = useDispatch();
const characters = useSelector((state: RootState) => state.characters);
const [name, setName] = useState(character.name);
const [description, setDescription] = useState(character.description || '');
function handleSubmit(e: React.FormEvent<HTMLFormElement>) {
e.preventDefault();
const updatedCharacter = { ...character, name, description };
dispatch(setCharacter(updatedCharacter));
const updatedCharacters = characters.map((char) =>
char.id === updatedCharacter.id ? updatedCharacter : char
);
dispatch(setCharacters(updatedCharacters));
onSubmit();
}
return (
<Form onSubmit={handleSubmit}>
<span>Name: </span>
<input value={name} onChange={(e) => setName(e.target.value)} />
<span>Description: </span>
<textarea
value={description}
onChange={(e) => setDescription(e.target.value)}
/>
<button type="submit">Save</button>
</Form>
);
};
CharacterForm.propTypes = {
character: PropTypes.shape({
id: PropTypes.string.isRequired,
name: PropTypes.string.isRequired,
description: PropTypes.string,
thumbnail: PropTypes.string.isRequired,
}).isRequired,
onSubmit: PropTypes.func.isRequired,
};
export default CharacterForm;
|
<reponame>tcoenraad/functioneel-programmeren-uitwerkingen
module Exercise where
-- recursive
jollyJumper :: [Int] -> Bool
jollyJumper n = jollyJumper' ((length n) - 1) n
jollyJumper' :: Int -> [Int] -> Bool
jollyJumper' _ [_] = True
jollyJumper' size (x:y:xs) | abs (x - y) == size = jollyJumper' (size - 1) (y:xs)
| otherwise = False
-- higher-order
jollyJump :: [Int] -> Bool
jollyJump xs = and $ zipWith (==) diff (tail diff) where
diff = difference $ difference xs
difference :: [Int] -> [Int]
difference [_] = []
difference (x:y:xs) = abs(x - y):difference(y:xs)
|
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Scanner;
import java.util.TreeSet;
public class D {
private static HashMap<Character, Integer> fac;
private static HashMap<Character, Integer> mst;
private static TreeSet<Integer> cardsLeft;
private static int n;
private static int m;
private static boolean check(int[][] cards, int fx, int fy) {
boolean good = true;
for (int i = fx; i < fx + 3; ++i) {
for (int j = fy; j < fy + 3; ++j) {
if (cards[i][j] % 100 != cards[fx][fy] % 100) {
good = false;
break;
}
}
if (!good) {
break;
}
}
if (good) {
return true;
}
for (int i = fx; i < fx + 3; ++i) {
for (int j = fy; j < fy + 3; ++j) {
for (int k = fx; k < fx + 3; ++k) {
for (int l = fy; l < fy + 3; ++l) {
if (i == k && j == l) {
continue;
}
if (cards[i][j] / 100 == cards[k][l] / 100) {
return false;
}
}
}
}
}
return true;
}
private static String getCard(int n) {
String res = "";
for (int i: fac.keySet()) {
if (n / 100 == fac.get((char)i)) {
res += (char)i;
break;
}
}
for (int i: mst.keySet()) {
if (n % 100 == mst.get((char)i)) {
res += (char)i;
break;
}
}
return res;
}
public static void main(String[] args) {
Scanner in = new Scanner(System.in);
PrintWriter out = new PrintWriter(System.out);
n = in.nextInt();
m = in.nextInt();
int[][] cards = new int[n][m];
int j1x = -1, j1y = -1, j2x = -1, j2y = -1;
mst = new HashMap<Character, Integer>();
mst.put('C', 0);
mst.put('D', 1);
mst.put('H', 2);
mst.put('S', 3);
fac = new HashMap<Character, Integer>();
String temp = "23456789";
for (int i = 0; i < temp.length(); ++i) {
fac.put(temp.charAt(i), i + 2);
}
fac.put('T', 10);
fac.put('J', 11);
fac.put('Q', 12);
fac.put('K', 13);
fac.put('A', 14);
cardsLeft = new TreeSet<Integer>();
for (int i = 2; i <= 14; ++i) {
for (int j = 0; j <= 3; ++j) {
cardsLeft.add(100 * i + j);
}
}
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
String s = in.next();
if (s.charAt(1) == '1') {
j1x = i;
j1y = j;
cards[i][j] = 51;
} else if (s.charAt(1) == '2') {
j2x = i;
j2y = j;
cards[i][j] = 52;
} else {
cards[i][j] = 100 * fac.get(s.charAt(0)) + mst.get(s.charAt(1));
cardsLeft.remove(cards[i][j]);
}
}
}
ArrayList<Integer> good = new ArrayList<Integer>();
if (j1x == -1 && j2x == -1) {
for (int i = 0; i < n - 2; ++i) {
for (int j = 0; j < m - 2; ++j) {
if (check(cards, i, j)) {
good.add(100 * i + j);
}
}
}
boolean found = false;
for (int i = 0; i < good.size(); ++i) {
for (int j = 0; j < good.size(); ++j) {
if (i == j) {
continue;
}
if (Math.abs(good.get(i) % 100 - good.get(j) % 100) > 2 || Math.abs(good.get(i) / 100 - good.get(j) / 100) > 2) {
found = true;
out.println("Solution exists.");
out.println("There are no jokers.");
out.println("Put the first square to (" + (good.get(i) / 100 + 1) + ", " + (good.get(i) % 100 + 1) + ").");
out.println("Put the second square to (" + (good.get(j) / 100 + 1) + ", " + (good.get(j) % 100 + 1) + ").");
break;
}
}
if (found) {
break;
}
}
if (!found) {
out.println("No solution.");
}
} else if (j1x == -1) {
boolean found = false;
for (int extra: cardsLeft) {
cards[j2x][j2y] = extra;
for (int i = 0; i < n - 2; ++i) {
for (int j = 0; j < m - 2; ++j) {
if (check(cards, i, j)) {
good.add(100 * i + j);
}
}
}
for (int i = 0; i < good.size(); ++i) {
for (int j = 0; j < good.size(); ++j) {
if (i == j) {
continue;
}
if (Math.abs(good.get(i) % 100 - good.get(j) % 100) > 2 || Math.abs(good.get(i) / 100 - good.get(j) / 100) > 2) {
found = true;
out.println("Solution exists.");
out.println("Replace J2 with " + getCard(extra) + ".");
out.println("Put the first square to (" + (good.get(i) / 100 + 1) + ", " + (good.get(i) % 100 + 1) + ").");
out.println("Put the second square to (" + (good.get(j) / 100 + 1) + ", " + (good.get(j) % 100 + 1) + ").");
break;
}
}
if (found) {
break;
}
}
if (found) {
break;
}
good.clear();
}
if (!found) {
out.println("No solution.");
}
} else if (j2x == -1) {
boolean found = false;
for (int extra: cardsLeft) {
cards[j1x][j1y] = extra;
for (int i = 0; i < n - 2; ++i) {
for (int j = 0; j < m - 2; ++j) {
if (check(cards, i, j)) {
good.add(100 * i + j);
}
}
}
for (int i = 0; i < good.size(); ++i) {
for (int j = 0; j < good.size(); ++j) {
if (i == j) {
continue;
}
if (Math.abs(good.get(i) % 100 - good.get(j) % 100) > 2 || Math.abs(good.get(i) / 100 - good.get(j) / 100) > 2) {
found = true;
out.println("Solution exists.");
out.println("Replace J1 with " + getCard(extra) + ".");
out.println("Put the first square to (" + (good.get(i) / 100 + 1) + ", " + (good.get(i) % 100 + 1) + ").");
out.println("Put the second square to (" + (good.get(j) / 100 + 1) + ", " + (good.get(j) % 100 + 1) + ").");
break;
}
}
if (found) {
break;
}
}
if (found) {
break;
}
good.clear();
}
if (!found) {
out.println("No solution.");
}
} else {
int[] cardsleft = new int[cardsLeft.size()];
int pos = 0;
for (int i: cardsLeft) {
cardsleft[pos++] = i;
}
boolean found = false;
for (int ii = 0; ii < cardsleft.length; ++ii) {
for (int jj = 0; jj < cardsleft.length; ++jj) {
if (ii == jj) {
continue;
}
cards[j1x][j1y] = cardsleft[ii];
cards[j2x][j2y] = cardsleft[jj];
for (int i = 0; i < n - 2; ++i) {
for (int j = 0; j < m - 2; ++j) {
if (check(cards, i, j)) {
good.add(100 * i + j);
}
}
}
for (int i = 0; i < good.size(); ++i) {
for (int j = 0; j < good.size(); ++j) {
if (i == j) {
continue;
}
if (Math.abs(good.get(i) % 100 - good.get(j) % 100) > 2 || Math.abs(good.get(i) / 100 - good.get(j) / 100) > 2) {
found = true;
out.println("Solution exists.");
out.println("Replace J1 with " + getCard(cardsleft[ii]) + " and J2 with " + getCard(cardsleft[jj]) + ".");
out.println("Put the first square to (" + (good.get(i) / 100 + 1) + ", " + (good.get(i) % 100 + 1) + ").");
out.println("Put the second square to (" + (good.get(j) / 100 + 1) + ", " + (good.get(j) % 100 + 1) + ").");
break;
}
}
if (found) {
break;
}
}
if (found) {
break;
}
good.clear();
}
if (found) {
break;
}
}
if (!found) {
out.println("No solution.");
}
}
in.close();
out.close();
}
}
|
// tries to reset the internal status of the LP solver in order to ignore an
// instability of the last solving call
SCIP_RETCODE SCIPlpiIgnoreInstability(SCIP_LPI* lpi,
SCIP_Bool* success
) {
SCOPED_TIME_STAT(lpi->stats);
LOG(FATAL) << "calling SCIPlpiIgnoreInstability";
return SCIP_OKAY;
}
|
def logout_all_flow(db: Session, token: "Token") -> None:
_consume_refresh_token(db=db, token=token)
db.query(RefreshToken).filter(RefreshToken.user_id == token.payload.sub).delete(synchronize_session=False)
db.commit()
|
First we got Martian Manhunter, then we got to see a legionnaire ring, then the Flash crossover, and this week we may have just gotten our first clue for the arrival of a very beloved character in the DC mythos.
Superboy.
Project Cadmus, created by the brilliant Jack Kirby, has a rich history in the DC universe and one that has been interpreted many times. Most recently, fans of the (cancelled too soon) series Young Justice were introduced to the infamous facility during the explosive premiere back in 2011.
Cadmus does in fact serve as the (original) origin for Connor Kent who was created in the lab as an attempt to create another Superman. Young Justice fans might remember that Superboy was created by using the DNA of Lex Luther and Superman. This makes for a cool origin that sets up an intriguing arc for character development, but it is not the original telling of Superboy’s tale.
Superboy was created by Cadmus not by using DNA of Superman but rather altering a regular human clone to have the abilities of the Kryptonian. This means that the series does not have to jump the hurdle of Lex and Clark. They can maintain Connor’s original origin and integrate him into the Supergirl world fairly easily.
Are we concerned that Superboy might outshine Kara? I’m not. I think the wise thing to do would be to keep Connor as a teenager as he is meant to be. The angst and the recklessness paired with Kara’s energy I think would be interesting and complimentary rather than overpowering.
From episode 1×17 we know that they’ve kept Jeremiah Danvers alive all of these years… Why? For those of you that weren’t alive in the 90’s, the actor playing Jeremiah once played the Man of Steel in a series called Lois and Clark. So, if they do stick to the original story of Superboy and have him be an altered human clone, why not have it be Jeremiah’s? It would be a fun twist. They have altered Wally West to be the brother of Iris instead of the nephew. I can see Superboy still go by Connor Kent but be related to Alex and Jeremiah.
Or, the best we’re going to get is the introduction of Guardian and this was all just a dream gone wild.
Time will tell
Advertisements
|
SAN DIEGO — Two San Diego congressmen want to know why federal agents raided an Oceanside gun parts store last weekend and what the government plans to do with the computers and customer lists it confiscated.
Rep. Duncan Hunter (R-Alpine) sent a letter to the director of the Bureau of Alcohol, Tobacco, Firearms and Explosives Friday asking for information about the raid on Ares Armor last Saturday. The store owner had been granted an injunction by a judge to prevent the raid, but BATFE agents served a search warrant on the business anyway.
READ HUNTER’S LETTER TO THE BATFE
In a letter to BATFE Director Todd Jones, Issa asked
why customer information unrelated to the investigation was collected,
what BATFE intends to do with the data,
how BATFE will insure that the customer data remains confidential.
Another member of the San Diego congressional delegation, Rep. Darrell Issa (R-Vista) has also expressed concerns over the raid. Issa said he wants BATFE officials to explain what the investigation is about.
“It’s concerning when you have the ATF disregard an injunction and get another judge to sign a warrant, so I want to see what happened,” Issa told Fox 5 Thursday. “Tomorrow members of my staff will be sitting down with officials to see why they raided that shop, and I’m deeply concerned about the collection of customer lists who have nothing to do with the controversy.”
|
/// Returns true if the rectangle is contained within the outline of this path. This function is
/// an approximation, and can return false even if the rectangle is contained within this path's
/// outline. However, it will never return true when the rectangle is _not_ contained within this
/// path's outline.
pub fn conservatively_contains_rect(&self, rect: impl AsRef<Rect>) -> bool {
unsafe {
self.native()
.conservativelyContainsRect(rect.as_ref().native())
}
}
|
// AddToSlackDemand adds resources to the slack demand
// for the resource pool
func (n *resPool) AddToSlackDemand(res *scalar.Resources) error {
n.Lock()
defer n.Unlock()
n.slackDemand = n.slackDemand.Add(res)
log.WithFields(log.Fields{
"respool_id": n.ID(),
"demand": n.slackDemand,
}).Debug("Current Demand after Adding resources")
return nil
}
|
package me.bymartrixx.ragnarok.server.mixin.item;
import me.bymartrixx.ragnarok.server.RagnarokMod;
import me.bymartrixx.ragnarok.server.RagnarokOptions;
import net.minecraft.enchantment.EnchantmentHelper;
import net.minecraft.entity.LivingEntity;
import net.minecraft.item.ItemStack;
import net.minecraft.item.TridentItem;
import net.minecraft.world.World;
import org.spongepowered.asm.mixin.Mixin;
import org.spongepowered.asm.mixin.injection.At;
import org.spongepowered.asm.mixin.injection.Redirect;
/**
* Mixin to {@link TridentItem}.
*/
@Mixin(TridentItem.class)
public class TridentItemMixin {
@Redirect(at = @At(
value = "INVOKE",
target = "Lnet/minecraft/enchantment/EnchantmentHelper;getRiptide(Lnet/minecraft/item/ItemStack;)I"),
method = "onStoppedUsing")
private int getRiptideLevel(ItemStack stack, ItemStack stack2, World world, LivingEntity user,
int remainingUseTicks) {
if (RagnarokOptions.riptideFail > 0) {
if (user.getRandom().nextInt(100) <= RagnarokOptions.riptideFail - 1) {
RagnarokMod.debug("Riptide failed");
return -1;
}
}
return EnchantmentHelper.getRiptide(stack);
}
}
|
<filename>library/hwlib-doxygen.hpp
// ==========================================================================
//
// File : hwlib-doxygen.hpp
// Part of : C++ hwlib library for close-to-the-hardware OO programming
// Copyright : <EMAIL> 2017
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// ==========================================================================
// this file contains Doxygen lines (of course, that is its only purpose)
/// @file
// This file contains doxygen stuff that doesn't belong in any specific
// header file. Hence this file is not included by hwlib-all.hpp.
/// \brief
/// hwlib library
/// \details
/// This namespace contains the target-independent
/// parts of the hwlib library.
namespace hwlib {}
/// \mainpage
///
/// \image html toolset.png
///
/// \author <NAME> (<EMAIL>)
/// \version 1.1 (last modified 2017-02-15)
/// \copyright boost license (but see below for exceptions)
///
/// -------------------------------------------------------------------------
///
/// Hwlib is a C++ OO library for close-to-the-hardware programming.
/// It is used in an number of courses at the Hogeschool Utrecht.
/// The library is meant to be used with bmptk.
/// The language used is the 2014 C++ standard.
///
/// Hardware pins and ports, and other hardware-related interfaces like
/// A/D converters and character streams are represented by abstract
/// interfaces (classes with virtual functions).
///
/// -------------------------------------------------------------------------
///
/// All hwlib files are provided under the boost license, except:
/// - some pictures used in the documentation are under a
/// Creative Commons license that allows unaltered reproduction
/// - an arduino-due include file is from Atmel (Microchip) under asl
///
/// These exceptions have no impact on the status of an application
/// that includes hwlib in its build.
///
// ==========================================================================
//
/// \page use Use
///
/// Implementations of the hardware abstractions (like pins and delays)
/// are provided for the supported targets:
/// - Arduino Due (ATSAM3X8E chip)
/// - DB103 (LPC1114 chip)
/// - Arduino Uno (ATMEGA328P chip)
///
/// The easy way is to use the library with bmptk.
/// Include hwlib.hpp, which will include the
/// correct target header based on the TARGET that is set in the
/// bmptk makefile. Bmptk passes this setting to the hwlib.hpp file
/// via a macro definition on the compiler command line.
///
/// \snippet "demo\arduino-due\blink\main.cpp" [blink example]
/// \snippet "demo\arduino-due\makefile.link" [makefile example]
///
/// Another way to use the library is by directly including the header
/// for the target (hwlib-arduino-due.hpp, ...), and using some other
/// build system.
///
/// \snippet "demo\arduino-due\blink-direct-include\main.cpp" [blink example]
///
///
// ==========================================================================
//
/// \page buffering Buffering
///
/// For efficiency, some operations are provided in two ways: direct
/// and (potentially) buffered. A direct operation will have an
/// immediate external effect (for output operations) or work on a
/// fresh sample of the external conditions (for input operations).
/// A buffered operation might have its effect delayed up to the
/// next flush operation (output), or work on input that is as old
/// as the most recent refresh() oparation.
///
/// The easy way is to always use the unbuffered operations.
/// Drawing a drawable is by default unbuffered.
///
/// \snippet "demo\arduino-due\oled-timing\main.cpp" [direct]
///
/// Using unbuffered operations can produce a significant speedup
/// because the actual output operation is postponed, and can
/// handle all changes in one go.
///
/// \snippet "demo\arduino-due\oled-timing\main.cpp" [buffered]
///
/// For the OLED the difference can be very large.
/// Writing one pixel requires up to three operations:
/// - optionally set X address (7-byte I2C transaction)
/// - optionally set Y address (7-byte I2C transaction)
/// - write data byte (3-byte I2C transaction)
/// For a direct operation this is done for each and every pixel.
///
/// The buffered graphics operations write to the in-memory pixel buffer,
/// which is written out to the oled in one go by the flush operation.
/// This is done in one I2C transaction, with some small overhead and
/// 1024 data bytes. Hence a flush takes roughtly the time it takes to
/// write 60 pixels. For the SPI interface the single pixel write
/// overhead is a little bit smaller because a SPI transfer has no command
/// byte (hence each transfer takes 1 byte less).
///
/// Character output to a graphic window is always buffered. The
/// flush manipulator is required to write the pixels to the screen.
///
/// \snippet "demo\arduino-due\oled-hello\main.cpp" [flush]
///
// ==========================================================================
//
/// \page naming Naming conventions
///
/// The following naming conventions are used:
/// - functions that are called set() and get() (or have set or get as
/// part of their name) deal with entities or effects that are
/// memoryless (behave like a variable): calling set() twice with
/// the same value has the same effect as calling it once;
/// calling get() twice should (when the context has not changed)
/// return the same value.
/// Examples are
/// - digital and analog I/O pins
/// - pixels in a graphic window
/// - the value in a pool (synchronization mechanism)
/// - functions that are called read() and write() (or have those terms
/// as part of their names) deal with entities or effects that have
/// memory or an otherwise lasting effect. Examples are:
/// - character (and other) streams
/// - channels (queue-like synchronization mechanism)
///
// ==========================================================================
//
/// \page rationale Rationale
///
/// Hwlib makes it possible to program micro-controllers in classic Object
/// Oriented style (using objects, inhertitance, virtuals, etc).
/// It provides abstract interfaces to the basic low-level things found
/// in a micro-controller like pins, ports and A/D interfaces, and
/// implementations of these things for a few targets.
///
/// Hwlib is used in a number of C++ courses
/// - as library for the examples
/// - as library for homework
/// - style example for hardware OO interfacing
///
/// Hwlib is meant to be usable and understandable by users with (only)
/// a basic knowledge of C++, specifically:
/// - basic C: types, expressions, control, functions, pointers,
/// declaration versus definition, use of header files
/// - char, short, int, long versus uintN_t, uint_fastN_t
/// - struct, class, public, private, protected, static
/// - constructors (incl. delegation), destructors
/// - inheritance, virtual, abstract interface, override, final
/// - const, constexpr
/// - static_cast<>
/// - references, object lifetime (danger of dangling references)
/// - for(:) used with arrays
/// - the effect of __attribute__((weak))
/// - use of << and >> for output and input
///
/// The following design patterns are used (extensively) within hwlib:
/// - adapter, decorator, composite, proxy
/// (grouped because these are variations of the same basic idea)
/// - non-virtual interface (NVI)
/// - dependency injection
/// - proxy
///
/// The following C++ features are deliberately *not* used,
/// because they are eiter too advanced,
/// or not appropriate for use on small micro-controllers:
/// - dynamic memory (new, delete, STL containers)
/// - exception handling (throw, try ... catch)
/// - templates (except static_cast<>, and string<N>)
/// - RTTI, dynamic_cast
///
/// Hwlib must be effective on micro-controllers with different word sizes.
/// Hence plain int types are (almost) never used,
/// instead the likes of uint_fast8_t are used.
///
/// Hwlib is *not* meant to be the most effective (fast, compact) library
/// for close-to-the-hardware programming, because that requires concepts
/// (templates, and a lot of SFINAE) that are not appropriate for
/// (relatively) new C++ programmers.
/// My hwcpp library is an (ongoing) attempt to fill that niche.d
///
// ==========================================================================
//
/// \page acknowledgements Acknowledgements
///
/// This documentation is provided under the same (boost) license
/// as the library, except for some of the pictures:
///
///--------------------------------------------------------------------------
///
/// \image html toolset.png
/// Creative Commons Attribution 3.0
///
///--------------------------------------------------------------------------
///
/// \image html uno-pinout.png
/// Creative Commons, details in the image itself
///
///--------------------------------------------------------------------------
///
/// \image html due-pinout.png
/// Creative Commons, details in the image itself
///
///--------------------------------------------------------------------------
///
/// \image html mini-micro-pinout.png
/// Creative Commons, details in the image itself
///
///--------------------------------------------------------------------------
///
/// \image html nano-pinout.png
/// Creative Commons, details in the image itself
///
///--------------------------------------------------------------------------
///
/// A source for IC pinouts:
/// https://commons.wikimedia.org/wiki/Category:IC_pinout_diagrams
// ==========================================================================
//
|
//
// Created by <NAME> on 2018/11/12.
//
#ifndef HURRICANE_HURRICANEARMSYSTEM_H
#define HURRICANE_HURRICANEARMSYSTEM_H
#include "CAHRR/src/PIDAccumulator.h"
#include "CAHRR/src/RampAccumulator.h"
#include "CAHRR/src/RotateAccumulator.h"
#include "CAHRR/src/DeltaAccumulator.h"
#include "CAHRR/src/AvgAccumulator.h"
#include "OI.h"
#include "hal.h"
const int ARM_BOTTOM_ID = 5;
const int ARM_TOP_ID = 6;
const int ARM_BASE_ID = 7;
class ArmBottom;
class ArmTop;
class ArmBase;
class HurricaneArmSystem {
private:
double position;
ArmBottom* arm_bottom;
// TODO: check if this flag is needed
bool data_available_bottom, data_available_base, data_available_top;
public:
bool disabled = false; // todo: remove
HurricaneArmSystem();
bool initialize();
bool update();
bool destroy();
bool setPosition(double position);
void data();
};
#endif //HURRICANE_HURRICANEARMSYSTEM_H
|
Antitachycardia pacing for termination of ventricular tachyarrhythmias: should we use it?
This editorial refers to ‘Clinical course and prognostic relevance of antitachycardia pacing-terminated ventricular tachyarrhythmias in implantable cardioverter-defibrillator patients’ by T. Kleemann et al ., doi:10.1093/europace/euv007.
The first implantable cardioverter-defibrillator (ICD) device was developed by Drs Michel Mirowski and Morty Mower, and successfully implanted in a human being at John Hopkins Hospital in February 1980.1 The device only had the capability of defibrillation, required thoracotomy, and due to the size and weight of the device (225 g), the battery had to be implanted abdominally. Since then, major advances in the design, implantation technique, and capabilities of the device have been implemented, including transvenous lead implantation, subcutaneous device placement, and the addition of overdrive pacing, also known as antitachycardia pacing (ATP), for termination of ventricular tachycardia.2
Today, through numerous randomized trials, the survival benefit of both primary and secondary prevention ICD implantation has irrevocably been established.3,4 However, even though the device provides a clear survival benefit, there is evidence that unnecessary ICD therapy might contribute to increased mortality and morbidity.5–8 For appropriate ICD therapy, this is mainly due to the deleterious effects of the malignant rhythm on the myocardium, but some studies also suggest that the shock itself, may contribute to myocardial damage and hence worse prognosis irrespective of the underlying rhythm.7,8 Furthermore, from a patient perspective, there are psychological issues as well as evidence of reduced quality-of-life in relation to ICD shocks that need to be taken …
|
package orm
type EventType uint8
const (
TICK EventType = 1 + iota // Tick of counter
ABS_CORR // Absolute correction of reading
ABS_READ // Absolute reading
LAST // Automatically appendend event with current time
)
type CounterEvent struct {
ID uint `gorm:"primary_key"` // primary key
CounterID uint `sql:"index"` // foreign key of the counter
Timestamp uint64 `sql:"index` // timestamp of the event
EventType EventType // see above
Delta int64 // delta multiplied by 1000
Reading uint64 // reading multiplied by 1000
}
func NewLastCounterEvent(counter Counter) CounterEvent {
return CounterEvent{
CounterID: uint(counter.ID),
Timestamp: GetNow(),
EventType: LAST,
Delta: 0,
Reading: counter.Reading}
}
func NewTickCounterEvent(counter Counter) CounterEvent {
return CounterEvent{
CounterID: uint(counter.ID),
Timestamp: GetNow(),
EventType: TICK,
Delta: int64(counter.TickAmount),
Reading: counter.Reading + counter.TickAmount}
}
func NewAbsCorrCounterEvent(counter Counter, delta int64) CounterEvent {
return CounterEvent{
CounterID: uint(counter.ID),
Timestamp: GetNow(),
EventType: ABS_CORR,
Delta: delta,
Reading: uint64(int64(counter.Reading) + delta)}
}
|
/**
* Created by pgarcia on 29/6/17.
*/
public class CarouselUpIntroAdapter extends FragmentPagerAdapter implements ViewPager.PageTransformer {
private final static float BIG_SCALE = 1.0f;
private final static float SMALL_SCALE = 0.7f;
private final static float DIFF_SCALE = BIG_SCALE - SMALL_SCALE;
public int loops;
private CarouselUpIntroAdapter cur = null;
private CarouselUpIntroAdapter next = null;
private Context context;
private List<OpenWeatherResponseBean> elements;
private FragmentManager fragmentManager;
private float scale;
public CarouselUpIntroAdapter(Context context, FragmentManager fragmentManager, List<OpenWeatherResponseBean>elements) {
super(fragmentManager);
this.fragmentManager = fragmentManager;
this.context = context;
this.elements = elements;
if(this.elements.size()<2){
loops = 1;
}else{
loops = 1000;
}
}
@Override
public Fragment getItem(int position) {
// make the first pager bigger than others
int first = (elements.size() * loops) / 2;
if (position == first)
scale = BIG_SCALE;
else
scale = SMALL_SCALE;
position = position % elements.size();
return ItemCarouselUpFragment.newInstance(context, position, scale, this.elements.get(position));
}
@Override
public int getCount() {
return elements.size() * loops;
}
@Override
public void transformPage(View page, float position) {
CarouselLayout carouselLayout = (CarouselLayout) page.findViewById(R.id.carouselLayout);
float scale = BIG_SCALE;
if (position > 0) {
scale = scale - position * DIFF_SCALE;
} else {
scale = scale + position * DIFF_SCALE;
}
if (scale < 0) scale = 0;
carouselLayout.setScaleBoth(scale);
}
}
|
import Data.Char
main = do
l <- getLine
putStrLn $ solve $concat $words l
solve s = if toUpper (last (init s)) `elem` "AEIOUY" then "YES"
else "NO"
|
Asymmetric Cell Division and Notch Signaling Specify Dopaminergic Neurons in Drosophila
In Drosophila, dopaminergic (DA) neurons can be found from mid embryonic stages of development till adulthood. Despite their functional involvement in learning and memory, not much is known about the developmental as well as molecular mechanisms involved in the events of DA neuronal specification, differentiation and maturation. In this report we demonstrate that most larval DA neurons are generated during embryonic development. Furthermore, we show that loss of function (l-o-f) mutations of genes of the apical complex proteins in the asymmetric cell division (ACD) machinery, such as inscuteable and bazooka result in supernumerary DA neurons, whereas l-o-f mutations of genes of the basal complex proteins such as numb result in loss or reduction of DA neurons. In addition, when Notch signaling is reduced or abolished, additional DA neurons are formed and conversely, when Notch signaling is activated, less DA neurons are generated. Our data demonstrate that both ACD and Notch signaling are crucial mechanisms for DA neuronal specification. We propose a model in which ACD results in differential Notch activation in direct siblings and in this context Notch acts as a repressor for DA neuronal specification in the sibling that receives active Notch signaling. Our study provides the first link of ACD and Notch signaling in the specification of a neurotransmitter phenotype in Drosophila. Given the high degree of conservation between Drosophila and vertebrate systems, this study could be of significance to mechanisms of DA neuronal differentiation not limited to flies.
Introduction
Asymmetric cell division (ACD) is a fundamental mechanism generating cell fate diversity during nervous system development . In Drosophila, progenitor cells delaminate from the neuroectoderm and start dividing along the apical-basal axis in a stem cell-like mode giving rise to another neuroblast (NB) and an intermediate precursor called ganglion mother cell (GMC) . During division, NBs localize proteins such as Inscuteable (Insc) and Bazooka (Baz) to the apical cortex and conversely, proteins such as Numb and Partner of Numb (Pon) to the basal cortex. Pon physically interacts with Numb and directs asymmetric localization of Numb . In general, the apical proteins or protein complexes control the localization of the basal proteins . GMC division is also asymmetric and results in two siblings with distinct cell fates .
Early in Drosophila embryonic development, the Notch pathway is instrumental in lateral inhibition, a process which singles out NBs from equivalent groups of neuroectodermal cells. During GMC divisions, Notch plays an active role in binary sibling cell fate specification. In this context, two opposing regulators of Notch, i.e. Numb and Sanpodo (Spdo) play important roles. While Numb antagonizes Notch signaling in one daughter cell , Spdo promotes Notch signaling in the other sibling resulting in differential activation of Notch signaling which ultimately generates two distinctly specified binary cell fates.
Drosophila midline cells arise from a group of mesectodermal cells which separate the mesodermal anlagen from the lateral neurogenic region. Midline cells are characterized by the expression of Single-Minded (Sim), the master regulator of ventral midline development . Midline precursors (MP) express unique or overlapping sets of marker genes and normally divide only once giving rise to two daughter cells.
DA neurons play a fundamental role in health and disease and their loss has been implicated in Parkinson's disease (PD) . In Drosophila, DA neurons also have roles in controlling behavior, learning and memory . A hallmark of DA neurons is the expression of Tyrosine Hydroxylase (TH), a rate limiting enzyme in dopamine synthesis and as such TH is commonly used as DA neuronal marker. Although DA neurons in Drosophila are known to be present from mid-embryonic development to adulthood, so far mechanistic insights came mostly from studying a single DA neuron derived from the ventral midline . However, the majority of DA neurons in Drosophila are of nonmidline origin and it has not been demonstrated whether similar mechanisms would apply for DA neuronal specification derived from these neuroblast lineages.
Here, we investigated the developmental origin and molecular mechanisms governing the specification of embryonic and larval DA neurons. We found that ACD and Notch signaling are crucial mechanisms for specifying DA neurons. In this context, Notch signaling represses DA neuronal fate or in other words DA neurons differentiate from cells without active Notch signaling. Our study provides for the first time a link between ACD, Notch signaling and DA neuron specification in Drosophila. Studying the cellular and molecular mechanisms of DA neuron specification in Drosophila might provide useful insights into vertebrate systems which could ultimately support strategies for controlling in vitro cell fate specification of DA neurons.
Lineage analysis
The lineages and projection patterns of TH-positive neurons were traced using the flip-out technique . Briefly, embryos were collected for 3 hours from a cross between flies of genotypes P{UAS-mCD8::GFP.L}LL4, P{hsFLP}22, y 1 w * ; Pin 1 /CyO and w 1118 ; P{AyGAL4}25/CyO. This was followed by three hours of aging and after which a 10-minute exposure to heat shock (32uC) to generate GFP positive clones in flies carrying both the flippase and FRT components. For lineage analysis in the embryos, heat treated embryos were further aged at 18uC until end of embryogenesis before fixation while for lineage analysis in the larva brain, heat treated embryos were aged at 18uC until third larval instar stage followed by fixation. Clones which were positive for both GFP and TH immunoreactivity were then analyzed and compared to published lineage data .
MARCM and siRNA knock-down analysis
For the clonal analysis, numb and insc mutations were recombined into the respective FRT chromosomes. Individual cross was set up between the MARCM ready strain and the strain carrying a mutation and an FRT site on same chromosome. Clones were induced between 4 to 7 hours (at 25uC) of embryonic development by heat shocking the embryos for 30 minutes at 37uC as described . Heat treated embryos were further aged at 25uC until third larval instar stage when larval brains were dissected. Clusters containing GFP marked clones were analyzed for TH expression. For the control experiments, crosses were set up between the MARCM ready strains and the strains containing FRT sites followed by analysis of TH expression. For siRNA knock-down analysis, UAS-siRNA baz was expressed using a ubiquitous driver, Da-GAL4. Third larval instar brains were dissected and analyzed for TH expression.
Conditional Notch knock-down experiments
For the analysis of Notch functional requirement in the embryonic H-cells, Notch embryos were collected for 1 hour at permissive temperature (18uC), aged for 5/7/9 hours at 18uC and exposed to restrictive temperature (29uC-30uC) in a water bath for 2 hours. Samples were further aged at 18uC until embryonic stages 16-17 before fixation. In a separate experiment, five-hour aged embryos were exposed to restrictive temperature continuously until embryonic stages 16-17 and analyzed. Dissection of period of Notch requirement for neuronal fate specification of the larval DA neurons was carried out by segregating out embryos of different developmental stages as well as early first larval instar and exposed them separately to restrictive temperature for 2 hours, shifted back to permissive temperature until third larval instar stage when the larval brains were dissected and analyzed. For the controls, embryos were collected from N ts flies which were constantly grown at 18uC.
Dopaminergic neurons in the embryonic and larval CNS
We began our analysis by examining the developmental profile of major clusters of DA neurons from embryonic until third larval instar stages. Tyrosine Hydroxylase (TH), encoded by the Drosophila pale (ple) gene is a rate limiting enzyme for the synthesis of neurotransmitter dopamine and therefore is widely used as a phenotypic marker for DA neurons. Using a-TH antibody as well as ple-GAL4 ; UAS-GFP reporter system we analyzed TH expression from embryonic to larval stages of development. In the embryo, TH protein expression was first observed at stage 14 (St14) in one neuron per segment in the ventral midline representing the H-cell . At St15, additional TH expression was observed weakly in a single neuron per hemineuromere at the dorsal lateral positions as well as in two paramedial cells neighboring the H-cells. By St17, TH expression became much more prominent in cells of the VNC; in 12 midline H-cells spanning the anterior-most suboesophageal segment-1 to the posterior-most abdominal-8 (A8) segment, in one cell per hemineuromere at dorsal lateral positions and transiently in two small paramedial (PM) cells flanking the H-cells ( Figure 1A, see also ). The two PM cells at suboesophageal segment-2 and thoracic segment-1 (T1) were slightly bigger in size than the other PM cells at T2 to A8. In agreement with a previous report , we observed TH expression in the embryonic brain lobes at St17 although expression was weak and in only a subset of cells normally labeled in late larval stages.
The H-cell is derived from the MP3 midline progenitor which produces the dopaminergic H-cell and its glutaminergic sibling, Hcell sib . Midline origin of H-cell was confirmed by the expression of b-Galactosidase under the regulation of Sim-GAL4, a midline specific driver (Fig. 1B) and the loss of H-cells in sim mutants (Fig. 1C). Neither the paramedial nor the dorsal lateral DA neurons were of midline origin as they did not co-express Sim-lacZ and were unaffected in sim mutants (Fig. 1B, C).
Embryonic TH expression persisted into larval stages. In the midline of third larval instar VNCs, H-cells could be found at the suboesophageal segments (SM1 and SM2), thoracic segments (TM1 to TM3) and abdominal segments (AM1 to AM7) ( Fig. 1D; nomenclature according to Selcho et al. ). The two cells directly neighboring SM2 and TM1 were referred to as suboesophageal mediolateral (SML2) and thoracic mediolateral (TML1) DA neurons. Nine strongly TH expressing cells could be found on each side at the lateral positions: two at the suboesophageal regions (SL1 and SL2) (Fig. 1D) and seven at the thoracic and abdominal regions (DL1-DL7) (Fig. 1D). We also observed an additional TH expressing cell at each dorsal lateral position between DL5 and DL6 (Fig. 1E, E9) as well as two THpositive cells at the posterior-most tip of the VNC (Fig. 1 F, F9), however they were not analyzed further due to relatively weak TH expression levels.
In the central brain of third larval instar, four major groups of DA neurons were previously reported spanning dorsal medial (DM) and dorsal lateral (DL) positions of each brain hemispheres . Recently, a report described the projection patterns of individual neurons within each clusters in greater detail and it seemed that although individual neurons within each clusters shared fasciculation pattern, they might not share similar projection patterns . For simplicity and ease of further analysis, we regrouped the DA neurons into a cluster based on level of proximity to one another and similarity in axonal fasciculation patterns (also as an indication for a possible lineage relationship) (Fig. S1). Using this approach, we regrouped the DA neurons into 6 clusters per hemispheres. Three clusters of neurons were clearly separated from one another at dorsal medial positions and neurons in these clusters generally projected ipsilaterally: DM1a, which contained a single neuron; DM1b which contained three neurons and DM2 which contained four neurons. The three distinct clusters in the dorsal lateral positions were: DL1 which contained seven tightly grouped neurons having axonal projection across the midline to the contra-lateral side of the brain hemisphere; DL2a which consisted of four neurons, some of which arborized on both sides of the brain but some remained strictly at the ipsilateral side of the brain. The DL2b cluster consisted of two neurons projecting towards the midline and after crossing the midline terminated on the contra-lateral side of the brain where they also showed some ipsilateral and descending projections to the suboesophageal neuromeres ( Fig. 1G and Fig. S1; see also Selcho et al. ).
All larval DA neurons did not express Neurotactin (Fig. 1H), a marker that specifically labels secondary neurons born only during larval neurogenesis . This clearly demonstrated that DA neurons in the larval hemispheres were born during embryonic development. Although it was reported that DA neurons in the larval hemispheres already expressed TH at the end of embryogenesis (St17) , individual clusters of DA neurons were not studied in greater detail. At early first larval instar (L1) the composition of all the DA neuronal clusters was almost complete with the exceptions of the DA neuron in DM1a as well as one neuron in the DL2a cluster which still did not express TH at L1 (Fig. 1I). By second larval instar however, the compositions of all six DA neuronal clusters were indistinguishable from those seen in third larval instar.
In summary, DA neurons in Drosophila were present at the embryonic VNC in segmental patterns as one cell per neuromere in the midline and generally one cell per hemineuromere in the paramedial as well as dorsal lateral positions. In the larval brain, a total of 76 dopaminergic neurons which strongly expressed TH and were mostly of embryonic origin could be found. The VNC contained a total of 34 DA neurons of which 12 were H-cells of midline origin and spanned the VNC from anterior to posterior. In the suboesophageal region, there were two neurons at the mediolateral positions (SML2) and four DA neurons at the lateral positions (SLs). Two neurons could be found at mediolateral positions in the first thoracic segment (TML1) and fourteen DA neurons at the dorsal lateral positions (DLs) in the abdominal segments. Both larval hemispheres contained a total of 42 DA neurons (2621 cells) organized in six distinct clusters based on their physical proximity and axonal fasciculation patterns.
Analysis of NB lineage context of DA neurons in the VNC
Except for the H-cells, information regarding the lineage context of other embryonic DA neurons in the VNC is unavailable. Hence, we performed flippase-induced mitotic recombination to generate GFP-labeled NB clones which were co-labeled with TH to determine the NB lineages giving rise to the VNC DA neurons. By comparing such flippase induced NB clones with published lineage data , we found four clones consisting of an average of 3 to 4 inter-neurons located ventrally to the neuropile which also contained the paramedial DA neurons ( Fig. 2A). Two such clones were at the abdominal segment-1 (A1, data not shown) consisting of 3 GFP marked interneurons which fasciculated and projected as a single bundle towards the ventral midline and extended their neurites contralaterally across the posterior commissures. Two examples of clones in A4 ( Fig. 2A, A9) also consisted of 3 GFP marked interneurons which fasciculated together but bifurcated at the contra-lateral connectives. From its small lineage, position within the ventral nerve cord as well as unique projection patterns of neurons, we thus conclude that the paramedial DA neurons are progeny of NB5-1 .
We also obtained abdominal clones which contained the dorsal lateral DA neurons. However, due to late appearance of TH expression in these cells, we could not assign them unambiguously to a particular NB. Those clones consisted of 2 to 3 GFP marked neurons with axons projecting towards the midline but bifurcated before reaching the midline. Also, a subperineural glia (SPG) was part of the clone. Such features are typical of the NB5-6A lineage . Analysis of clones in the larva further suggested that the dorsal lateral DA neurons arose from small lineages of 2-3 cells (Fig. 2B, B9; see also Table 1). To further confirm lineage identity of the paramedial and dorsal lateral DA neurons to row 5 NBs, we analyzed whether DA neurons co-expressed Wingless-LacZ (Wg-LacZ) or Huckebein-LacZ (Hkb-LacZ) as Wg-LacZ is known to be expressed in row 5 NB and within row 5, Hkb-LacZ expression is limited to NBs 5-4 and 5-5 NBs . We found that both the paramedial and the dorsal lateral DA neurons expressed Wg (Fig. 2C, C9) but not Hkb (data not shown). Expression of Wg-LacZ by the dorsal lateral DA neurons suggested that these neurons were derived from row 5 NBs and of these NB5-1, NB5-2, NB5-3 and NB5-6 do not express Hkb-LacZ. As NB5-1, NB5-2 and NB5-3 are located close to the ventral midline while the lateral DA neurons are located most laterally, they are thus possibly derived from NB5-6. For technical reasons, lineage analysis of DA neurons in the suboesophageal and first thoracic neuromeres was carried out in the larva. We found that SML2 (Fig. 3A) and SL neurons (Fig. 3B, C) were derived from small NB lineages which only contained 3 to 4 neurons and 2 to 3 neurons, respectively (see Table 1). However, due to the increased complexity of axonal projection patterns of larval VNC neurons, we were unable to assign these groups of DA neurons to any known and described NB lineages. Thus, the DA neurons in the fly embryonic ventral nerve cord are derivatives of the midline progenitor MP3, NB5-1, possibly the abdominal variant of NB5-6 (NB5-6A) and two other yet to be identified small NB lineages.
Role of asymmetric cell division in the specification of DA neurons
A role of a basally localized asymmetric component such as Numb in fate specification of daughter cells derived from the midline precursor cells (MPs) was recently reported . To extend on these findings, we first analyzed asymmetric localization of an apical complex protein Inscuteable (Insc) and a basal complex protein Partner-of-Numb (Pon) in MPs of St10-11 embryos. We found that Insc was generally expressed at the apical cortex of MPs throughout MP divisions ( Fig. 4A-F). Reversely, Pon was observed at the basal cortex of MPs from prophase to telophase when it was specifically distributed to the basal daughter cells following cytokinesis (Fig. 4M-R). The localization patterns of Insc and Pon in MPs as well as their apical-basal polarity were similar to those seen in the NBs (Fig. 4G-L and Fig. 4S-X, respectively), suggesting that MPs divide asymmetrically in a similar manner as described for NBs. There were a few exceptions in which NBs did not express Insc though they divided perpendicular to the cell surface. As Insc restricts proteins such as Numb to the basal cortex of dividing NBs or GMCs, its absence may result in distribution of Numb to both daughter cells hence resulting in equalized daughter cell fates. Symmetric division has been reported for the MP1 . In addition, we also observed MPs which divided parallel to the cell surface and hence would likely distribute Insc to both daughter cells.
To investigate whether DA neurons were affected when the asymmetric cell division machinery was disturbed, we analyzed TH expression in numb and inscuteable (insc) mutant embryos. In wild type, a single H-cell was found in each neuromere at the ventral midline (100%60, n = 96 neuromeres, Fig. 5A). While in numb mutants H-cells were not observed in most neuromeres (78%63.7, n = 125; Fig. 5D), in insc mutants they were duplicated at high frequency (73.6%63.5, n = 186 neuromeres; Fig. 5G). To further support a sibling cell fate transformation, we analyzed the expression pattern of Period (Per) which is expressed by the H-cell sibs, the iVUMs as well as other non-midline derived cells (Fig. 5B) . We found that the loss and gain of H-cells in numb and insc mutant embryos were accompanied by the reverse changes in the number of Per expressing cells at medial positions ( Fig. 5E and H, respectively). These data suggested a fate transformation between H-cell and its sibling involving asymmetric cell division. Cell fate transformation in numb and insc mutations were also observed for the other VNC DA neurons and their siblings such as the dorsal lateral DA neurons, which were mostly lost in numb (Fig. 5F) and duplicated in insc although duplication was at a low frequency (14.2%63, n = 201 hemineuromeres; Fig. 5I). We also analyzed the paramedial DA neurons and found that they were generally unaffected in insc mutants. By analyzing clear TH-expressing paramedial DA neurons in late staged embryos which also concurrently showed duplicated H-cells, we observed 1 THpositive cell in most hemineuromeres (99.2%60.6, n = 240, Fig. 5G). However, these cells were generally absent in numb mutants (Fig. 5D).
More than half of larval DA neurons are found in the central brain hemispheres. Therefore, we proceeded to study the involvement of ACD in fate specification of these neurons. As most mutations affecting asymmetric cell division (ACD) are embryonic lethal, we took two alternative approaches: firstly, we generated MARCM labeled mutant clones and secondly, we knocked-down gene function using the siRNA approach . MARCM clones were generated for insc and numb mutations as well as their respective FRT strains which served as controls and the DA clusters containing GFP-labeled clones were analyzed for the total number of TH expressing cells (see Table 2). We found that generally the numbers of DA neurons increased when DA clusters were part of insc MARCM clones. For example, we found that DM1b contained 4 TH-positive cells (n = 2) (Fig. 6A, B), DL1 which contained an average of 11.5 TH-positive cells (n = 4) (Fig. 6C, D) and DL2a which contained 5 TH-positive cells (n = 1) ( Table 2). We were unable to obtain insc clones that overlapped with DM1a, DM2 and DL2b clusters. Reversely, when numb clones were found in the vicinity or overlapped partly with DA clusters, less DA neurons were observed; e.g. the DM1a cells were absent (n = 2), DM1b which comprised of an average of 1.5 TH-positive cells (n = 4) (Fig. 6E-H), DL2a which comprised of an average of 2.7 TH-positive cells (n = 3) and DL2b which comprised of 1 TH-positive cell (n = 1) ( Table 2). It needs to be noted that due to technical limitations it was not possible to unambiguously link the GFP-positive mutant cells to missing TH+ neurons. However, our conclusion that numb affects larval DA neuron specification is based on the observation that the number of DA neuron was reduced in the DM1b clusters when the GFPpositive mutant cells were part of or in close proximity with the other wild type TH+ cells in these clusters. In addition, the mutant GFP+ cells occupied a position where the missing DA neurons were expected to be located, suggesting that the GFP-positive cells could represent the missing TH-positive cells. A role for numb in larval DA neuron specification was also supported by the finding that in embryos of numb mutants DA neurons were largely missing (see above). We did not obtain numb clones that overlapped with DM2 and DL1 clusters. We also examined sanpodo Table 2). In the VNC, the SML2 (Fig. S2B, B9) and TML1 (Fig, S2C, C9) cells were duplicated in spdo mutant clones (n = 4). Ubiquitous functional knock-down of bazooka (baz) using da-GAL4 also resulted in an increase of TH-expressing cells in DM1b (Fig. 7B), DM2 (Fig. 7D) and DL2b clusters (Fig. 7F)
Notch suppresses DA neuronal specification
Notch has been described as an effector of asymmetric cell division and binary sibling cell fate resolution . To investigate the role of Notch signaling in the specification of embryonic DA neurons, we first analyzed TH expression in the ventral midline of Notch 55e11 mutants and found supernumerary TH expressing cells. On average, we detected 6.660.2 H-cells in each segment (n = 93; Fig. S3B). The dorsal lateral DA neurons were also affected and on average 2.960.2 TH-positive cells were found per hemineuromeres (n = 132, Fig. S3B). In the embryonic ventral midline, the Period protein is expressed in H-sibs and iVUMs (Fig. S3C). Hence, we investigated if the increase of H-cells was accompanied by a reduction of Period expressing Hsibs. We found that loss of Notch resulted in complete loss of Period expression in all cells (Fig. S3D).
Previously, it was reported that disruption of the Notch signaling pathway caused transformation of some MPs towards MP3 fate which then resulted in supernumerary H-cells . Although the phenotype indicated a role of Notch on MP3 fate specification, a role of Notch on binary cell fate specifications has not been clearly demonstrated and not much is known about the temporal requirement for Notch in MP3 fate and sibling cell fate specifications. Therefore, we dissected the two roles of Notch with a conditional knock-out approach using the Notch temperature sensitive allele (N ts1 ). A typical midline progenitor with the exception of the midline neuroblast (MNB) divides only once at St8 when the eight midline progenitors per segment generate about 16 midline cells including the H-cell and the H-cell sib neurons . Generally, embryos were collected for 1 h and further grown on 18uC for specific periods (5 h, 7 h or 9 h), exposed to non-permissive temperature for 2 h to remove Notch function at particular stages and were then reared until St17 with Notch function restored (Fig. 8A). When Notch function was removed for 2 h after 5 to 6 h of embryonic development at permissive temperature (<St8) which was developmentally much earlier than the division of MPs during St10-11when binary post- mitotic siblings were generated, 12.4% of neuromeres (n = 97) showed duplicated H-cells and 1% of segments contained more than 2 H-cells ( Fig. 8C; Table 3). This suggested that removal of Notch at this developmental period possibly affected the process of MP specification and loss of Notch resulted in additional MP3s. The dorsal lateral DA neurons were also duplicated ( Fig. 8C) in about 23% of the hemineuromeres (n = 165) whereas the paramedial cells were found to be either duplicated (15%) or triplicated (2%) (n = 138 hemineuromeres) (Fig. 8C). When Notch function was removed for two hours after 7 to 8 hours of embryonic development (<St9 to early St10), 1 H-cell (26%), 2 Hcells (57%), 3 H-cells (12%) and 4 H-cells (4%) (n = 298 neuromeres) were observed ( Fig. 8D; Table 3). The high frequency of H-cells duplication suggested that binary cell fate specification involving Notch signaling in the MP3 lineage was disturbed although the presence of more than two H-cells suggested that at this same time point MP3 specification was also affected. Removal of Notch at these stages also resulted in additional paramedial (PM) cells: 1 PM (19%), 2 PMs (50%), 3 PMs (29%) and 4 PMs (2%) (n = 238 hemineuromeres). When Notch function was removed for 2 hrs after 9 to 10 hours of embryonic development (<early St10 to St11), 1 H-cell (18%), 2 H-cells (71%), 3 H-cells (6%) and 4 Hcells (5%) were observed (Fig. 8E and Table 3). The high frequency of duplicated H-cells in conjunction with the lower number of neuromeres having more than two H-cells suggested that at this developmental phase Notch was predominantly affecting binary sibling cell fate specification. The paramedial cells were also affected with 1 PM (28%), 2 PMs (47%), 3 PMs (20%) and 4 PMs (4%). However, removal of Notch at St9-11 of embryonic development did not seem to affect the dorsal lateral DA neurons (Fig. 8D, E and Table 3), suggesting that at this developmental phase Notch was not required for the specification of these neurons. Thus, our data suggested temporal requirement for Notch on MP3 specification and/or maintenance of MP3 fate suppression at around St8 to early St10 and a requirement for Notch in binary sibling cell fate specification at St9-11. Conversely, when we over-expressed the constitutively active intracellular domain of Notch (N intra ) in midline cells using sim- Gal4, we found a complete loss of DA neurons (Fig. S4B, E). Similar to a previous report by Wheeler et al. , over-expression of Numb with sim-Gal4 resulted in duplication of H-cells (Fig. S4C, F). SML2 and TML1 (arrows in Fig. S4B, C) were not affected in these experiments, suggesting that these cells were not of midline origin. As the additional TH expressing cells in the midline phenocopied the projection pattern of the H-cell (data not shown), complete transformation of H-sib into H-cell was likely. Loss of Hcells in the embryo following over-activation of Notch signaling was also accompanied by loss of the corresponding axonal projections.
To study Notch function as well as its temporal requirement for DA neuronal specification in the larval hemispheres, we used the conditional N ts allele to remove Notch function for two hours at different developmental stages (i.e. at St9-11, St13 and St16-17 of embryonic development as well as early first larval instar) and analyzed the number of TH expressing cells in third larval instar hemispheres. Removing Notch function at St13 onwards till larval stage had very little effect on DA fate specification in the larval hemispheres and only the DM1b clusters was affected with 8.3% of clusters having increased numbers of TH-positive cells (n = 24). However, when Notch function was removed during St9-11, DA fate specification was more broadly affected. From the 40 hemispheres investigated, we found a general increase of TH expressing cells in the following clusters: DM1a (17.5% with 2 cells, 15% with more than 2 cells), DM1b (53%.3 cells), DM2 (not affected), DL1 (10%.7 cells), DL2a (5%.4 cells) and DL2b (15%.2 cells) ( Fig. 9; Table 4). To confirm that the additional cells indeed represented sibling cell fate transformations, we traced the axonal fasciculation and projection patterns of all THexpressing cells and found that all cells in these clusters exhibited patterns comparable to the normal cells in these positions (data not shown).
In the larval VNC, increased number of cells were also seen in SM1 (10%), SM2 (32%), TM1 (26%), TM2-A7 (11%) and dorsal lateral cells (2%). Although we cannot rule out the possibility that the phenotypes we observed in the larval central brains were also due to mild neurogenic defects, we suggest that the observed additional cells were caused by defects in binary cell fate specification as the VNCs of the same larva in general did not show detectable neurogenic phenotypes. In addition, the fact that duplication of whole clusters was rarely seen suggests that removal of Notch at St9-11 largely affecting binary cell fate choice rather than generation of NBs.
Taken together, our data strongly indicate that Notch signaling controls fate specification of most if not all DA neurons in Drosophila.
Discussion
DA neurons in Drosophila have been shown to play roles in behavior as well as learning and memory . While quite recently the roles of asymmetric cell division (ACD) and Notch signaling have been demonstarted for the specification of the ventral midline derived H-cell, not much is known about the mechanisms of specification for the majority of DA neurons which are derived from the lateral and procephalic neuroectoderm. Here, we demonstrate that asymmetric cell division and Notch signaling are both required for the specification of most if not all DA neurons in Drosophila. Our data provides further insight into the genesis and mechanisms involved in the specification of DA neurons in general possibly with relevance to vertebrate systems.
Larval DA neurons are born during embryonic neurogenesis
Our analysis indicates that although the majority of larval DA neurons are fully developed or matured at first larval instar, they are born and specified during embryonic neurogenesis. This is supported by a number of observations: firstly, the majority of DA neurons are already present at early first larval instar, a stage with very limited larval neurogenesis as most neuroblasts are in a state of quiescence . Secondly, larval DA neurons do not express Neurotactin, a specific marker for secondary neurons born during larval neurogenesis. Thirdly, dissection of temporal requirement of Notch clearly shows that Notch is required during embryonic St9-11 for DA neuronal specification (see also below). Strikingly, although DA neurons are born and specified during midembryogenesis, the expression of TH protein in most DA neurons can only be detected at late embryonic to early larval stages. The cause of the delay between cell fate specification and neurotransmitter maturation of DA neurons is not understood and will require further analysis. It is possible, however, that mechanisms involving post-translational regulation of TH, e.g. via microRNAs (miRNA) are playing a role in this context as the Drosophila TH mRNA contains multiple predicted miRNA binding sites in its 39 untranslated region . DA neuronal specification requires asymmetric cell division Asymmetric cell division (ACD) is a major mechanism for generating cell type diversity in development . However, it has not been shown whether ACD as a general mechanism contributes to the specification of DA neurons in Drosophila. Our analysis of asymmetric protein localization patterns in the ventral midline supports previously published result . We found that most MPs localized Insc and Numb asymmetrically suggesting that the intrinsic machinery responsible for cell polarity during cell division is also observed for MPs. Functional analyses of mutations revealed a role for ACD in the specification of DA neurons including those outside of the ventral midline. We found that removal of insc, baz as well as numb and spdo affects DA neuronal specification in general. While removal of apical complex proteins generally results in additional DA neurons, the reverse phenotype is observed when basal components are removed. However, in insc mutant a less frequent duplication of the dorsal lateral DA neurons in the VNC is observed. Also, specification of the paramedial (PM) DA neurons is unaffected in insc mutants. This suggests that Insc may either be partially redundant or may not be required for specification of PM DA neurons. A partial redundancy of Insc has been described for the specification of MP2 cells as well as in late born cells within some NB lineages .
Specification of DA neurons in the larval brain hemispheres also requires ACD. insc, numb and spdo mutant clones generated during embryogenesis or da-GAL4; baz siRNA knock-down result in altered numbers of DA neurons in the majority of larval DA clusters. In insc and spdo clones as well as when baz is knocked down, DA clusters generally contain more DA neurons whereas in numb clones less DA neurons are observed. However, in insc and baz mutants, we rarely observed duplication of the whole DA clusters. An explanation could be that DA neurons in each cluster are not originated from the same lineage or insc and baz mutations only affected certain lineages but not others. Alternatively, Insc is only strictly required in cells born early in the NB lineages hence later born DA neurons within the same clusters are not affected resulting in less than double the size of neurons in these clusters. Differential requirement for insc based on the birth order of cells in NB lineages has been demonstrated in some embryonic NB lineages . Collectively, our data indicates a critical role of the ACD mechanism in fate specification of most DA neurons in Drosophila.
Notch is required for the specification of DA neurons
We and others have shown that the midline gives rise to one DA neuron per segment called the H-cell which derives from the MP3. Consistent with Wheeler et al. , we found approximately 6-7 TH+ cells in the ventral midline of Notch mutants. This number of cells is, however, inconsistent with a sole role of Notch in binary sibling cell fate specification of the MP3. It was suggested that Notch is required in differential fate specification of MPs and that in Notch additional MPs acquire MP3 cell fate . Thus, the role of Notch could be to repress other MPs to adopt an MP3 fate. Such a repressive role on neighboring cells is similar to the role of Notch in repressing ectodermal cells to take on a NB fate during the process of lateral inhibition . However, not much is known about the differential temporal requirement of Notch for MP specification and binary cell fate specification. Our temporal analysis using a conditional Notch allele revealed two possible overlapping phases of Notch requirement in the midline. Removal of Notch during St10-11 generated predominantly two H-cells. This timing is consistent with the MP divisions occurring at late St10 to St11 , also stages when binary sibling cell fate specification involving Notch signaling is normally taking place. When Notch function was removed at St9 of embryonic development, 17% of segments were found to contain between 3 to 5 H-cells. We are unable to completely rule out a possible overlap between the requirement for Notch during MP specification and binary sibling cell fate specification, but our data suggests that Notch requirement for MP3 specification begins at St8 and extends into stage 9-10 of embryonic development.
Further to that, we have shown that loss of Notch also results in supernumerary DA neurons outside of the midline. For example, conditional removal of Notch function resulted in additional dorsal lateral and PM DA neurons. Thus, our data reveals a general role of Notch in the specification of DA neurons. Further support for a critical role of Notch in this context comes from our analysis of numb and spdo mutants. Numb is described as a repressor of Notch . Consequently, we found that numb loss of function mutation results in reduced number of DA neurons possibly due to an upregulation of Notch function. spdo has been reported to potentiate Notch signaling in the cells that lack the Numb protein . As a result, we found additional DA neurons in clones lacking spdo function. Therefore, data obtained from studying Notch and the two important Notch regulators Numb and Spdo clearly disclose an essential role for Notch signaling in the specification of most if not all DA neurons in the fly.
Our study reveals a repressive function of Notch in the specification of DA neurons as the cell with active Notch signaling is normally not specified as DA neuron whereas the cell lacking or repressing Notch signal differentiates into DA neuron. Interestingly, a repressive role for Notch on DA neuronal specification is also observed in the frog spinal cord . It was also reported recently that loss of Notch signaling in the zebrafish led to expansion of cell numbers of DA neurons during development . Previous studies done in Drosophila CNS reported a role of Notch on the specification of a subset of DA neurons in the embryo . These reports in conjunction with our data support the notion of Notch as a repressor for DA neuronal specification and at the same time suggest a conserved role of Notch in the specification of DA neurons in flies, vertebrates and arthropods. In conclusion, our data clearly demonstrate that the genesis of DA neurons in the fruit fly requires asymmetric cell division and Notch signaling. Thus, it is very likely that DA neuronal specification in the fly follows a common mechanism requiring the repression of Notch signaling. It also remains to be determined whether Notch represses TH fate directly or indirectly through currently unidentified Notch target genes. The identification of genes which are actively involved in the specification of DA neurons will shed further insights on the general mechanism of DA neuronal specification possibly not limited to the fly system. Examples of third larval instar brains of N ts , shifted to restrictive temperature at St9-11, showing duplication of TH-positive neurons at DM1a (both brain hemispheres, C), DL1 (right brain hemisphere, D), DM (both brain hemispheres, E) and DL1 and DL2 (right and left brain hemispheres, respectively, F). doi:10.1371/journal.pone.0026879.g009 Table 4. Temporal analysis of Notch requirement for the specification of DA neurons in the larval central brain. Supporting Information
|
def install_nvm(host: Driver) -> None:
stdout = host.exec("test -f ~/.nvm/nvm.sh > /dev/null; echo $?").stdout
if stdout.decode().strip() == "0":
return
if host.platform == "ubuntu":
apt_install(host, ["curl"])
elif host.platform == "arch":
pacman_install(host, ["curl"])
else:
print("Neither ubuntu nor arch, let's continue and hope we have curl")
host.exec("curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.36.0/install.sh | bash")
host.exec_as_script("\n".join(nvm_init_script))
rc_file = get_shell_rc_file(host)
if rc_file is not None:
if host.exec(f"grep NVM_DIR {rc_file}").return_code != 0:
for i, line in enumerate(nvm_init_script):
host.exec_as_script("\n".join([
f"cat >> {rc_file} <<EOF",
*nvm_init_script,
"EOF",
]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.